Update godeps for etcd 3.0.4

This commit is contained in:
Timothy St. Clair 2016-07-22 13:54:40 -05:00
parent 456c43c22d
commit 5f008faa8b
457 changed files with 25492 additions and 10481 deletions

445
Godeps/Godeps.json generated
View File

@ -258,8 +258,8 @@
},
{
"ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.1.0-65-gee4a088",
"Rev": "ee4a0888a9abe7eefe5a0992ca4cb06864839873"
"Comment": "v1.2.1",
"Rev": "dfb21201d9270c1082d5fb0f07f500311ff72f18"
},
{
"ImportPath": "github.com/cloudflare/cfssl/auth",
@ -333,263 +333,263 @@
},
{
"ImportPath": "github.com/coreos/etcd/alarm",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/auth",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/auth/authpb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/clientv3",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/compactor",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/discovery",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/error",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/integration",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/lease",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/mvcc",
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/adt",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/contention",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/crc",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/transport",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/wait",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/raft",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/rafthttp",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/snap",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/snap/snappb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
},
{
"ImportPath": "github.com/coreos/etcd/storage",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
},
{
"ImportPath": "github.com/coreos/etcd/storage/backend",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
},
{
"ImportPath": "github.com/coreos/etcd/storage/storagepb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/store",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/version",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/wal",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/wal/walpb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/go-oidc/http",
@ -613,7 +613,7 @@
},
{
"ImportPath": "github.com/coreos/go-semver/semver",
"Rev": "d043ae190b3202550d026daf009359bb5d761672"
"Rev": "568e959cd89871e61434c1143528d9162da89ef2"
},
{
"ImportPath": "github.com/coreos/go-systemd/activation",
@ -647,33 +647,33 @@
},
{
"ImportPath": "github.com/coreos/pkg/capnslog",
"Comment": "v2",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
"Comment": "v2-8-gfa29b1d",
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
},
{
"ImportPath": "github.com/coreos/pkg/dlopen",
"Comment": "v2",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
"Comment": "v2-8-gfa29b1d",
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
},
{
"ImportPath": "github.com/coreos/pkg/health",
"Comment": "v2",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
"Comment": "v2-8-gfa29b1d",
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
},
{
"ImportPath": "github.com/coreos/pkg/httputil",
"Comment": "v2",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
"Comment": "v2-8-gfa29b1d",
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
},
{
"ImportPath": "github.com/coreos/pkg/timeutil",
"Comment": "v2",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
"Comment": "v2-8-gfa29b1d",
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
},
{
"ImportPath": "github.com/coreos/rkt/api/v1alpha",
"Comment": "v1.6.0",
"Rev": "14437382a98e5ebeb6cafb57cff445370e3f7d56"
"Comment": "v1.11.0-59-ga83419b",
"Rev": "a83419be28ac626876f94a28b4df2dbc9eac7448"
},
{
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
@ -895,123 +895,128 @@
},
{
"ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/compare",
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/description",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/equal",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/face",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/gostring",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/grpc",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/populate",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/size",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/stringer",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/testgen",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/union",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity/command",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/golang/glog",
@ -1019,19 +1024,23 @@
},
{
"ImportPath": "github.com/golang/groupcache/lru",
"Rev": "604ed5785183e59ae2789449d89e73f3a2a77987"
"Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433"
},
{
"ImportPath": "github.com/golang/mock/gomock",
"Rev": "bd3c8e81be01eef76d4b503f5e687d2d1354d2d9"
},
{
"ImportPath": "github.com/golang/protobuf/jsonpb",
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "b982704f8bb716bb608144408cff30e15fbde841"
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
},
{
"ImportPath": "github.com/google/btree",
"Rev": "cc6329d4279e3f025a53a83c397d2339b5705c45"
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
},
{
"ImportPath": "github.com/google/cadvisor/api",
@ -1270,6 +1279,21 @@
"ImportPath": "github.com/gorilla/mux",
"Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
},
{
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
"Comment": "v1.0.0-8-gf52d055",
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
},
{
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
"Comment": "v1.0.0-8-gf52d055",
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
},
{
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
"Comment": "v1.0.0-8-gf52d055",
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
},
{
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
@ -1351,7 +1375,7 @@
},
{
"ImportPath": "github.com/jonboulle/clockwork",
"Rev": "3f831b65b61282ba6bece21b91beea2edc4c887a"
"Rev": "72f9bd7c4e0c2a40055ab3d0f09654f730cce982"
},
{
"ImportPath": "github.com/juju/ratelimit",
@ -1736,8 +1760,8 @@
},
{
"ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.7.0-39-g3b78d7a",
"Rev": "3b78d7a77f51ccbc364d4bc170920153022cfd08"
"Comment": "0.7.0-52-ge51041b",
"Rev": "e51041b3fa41cece0dca035740ba6411905be473"
},
{
"ImportPath": "github.com/prometheus/client_model/go",
@ -1746,19 +1770,15 @@
},
{
"ImportPath": "github.com/prometheus/common/expfmt",
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d"
},
{
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d"
"Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650"
},
{
"ImportPath": "github.com/prometheus/common/model",
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d"
"Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650"
},
{
"ImportPath": "github.com/prometheus/procfs",
"Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d"
"Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a"
},
{
"ImportPath": "github.com/rackspace/gophercloud",
@ -1927,8 +1947,8 @@
},
{
"ImportPath": "github.com/russross/blackfriday",
"Comment": "v1.2-42-g77efab5",
"Rev": "77efab57b2f74dd3f9051c79752b2e8995c8b789"
"Comment": "v1.4-2-g300106c",
"Rev": "300106c228d52c8941d4b3de6054a6062a86dda3"
},
{
"ImportPath": "github.com/samuel/go-zookeeper/zk",
@ -2004,11 +2024,11 @@
},
{
"ImportPath": "github.com/ugorji/go/codec",
"Rev": "f4485b318aadd133842532f841dc205a8e339d74"
"Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
},
{
"ImportPath": "github.com/ugorji/go/codec/codecgen",
"Rev": "f4485b318aadd133842532f841dc205a8e339d74"
"Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
},
{
"ImportPath": "github.com/vishvananda/netlink",
@ -2200,7 +2220,7 @@
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d"
"Rev": "9c60d1c508f5134d1ca726b4641db998f2523357"
},
{
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
@ -2240,35 +2260,48 @@
},
{
"ImportPath": "google.golang.org/grpc",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/codes",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/credentials",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/internal",
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/metadata",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/naming",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/peer",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/transport",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "gopkg.in/gcfg.v1",
@ -2302,7 +2335,7 @@
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "a83829b6f1293c91addabc89d0571c246397bbf4"
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
},
{
"ImportPath": "k8s.io/heapster/metrics/api/v1/types",

1781
Godeps/LICENSES generated

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
====
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
@ -427,6 +427,8 @@ db.View(func(tx *bolt.Tx) error {
})
```
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
#### ForEach()
@ -437,7 +439,7 @@ all the keys in a bucket:
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket"))
b.ForEach(func(k, v []byte) error {
fmt.Printf("key=%s, value=%s\n", k, v)
return nil
@ -617,7 +619,7 @@ Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
{
NSURL* URL= [NSURL fileURLWithPath: filePathString];
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
NSError *error = nil;
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
forKey: NSURLIsExcludedFromBackupKey error: &error];
@ -835,6 +837,14 @@ Below is a list of public, open source projects that use Bolt:
backed by boltdb.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB.
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
If you are using Bolt in a project please send a pull request to add it to the list.

18
vendor/github.com/boltdb/bolt/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\boltdb\bolt
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -v -t ./...
build_script:
- go test -v ./...

9
vendor/github.com/boltdb/bolt/bolt_ppc.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build ppc
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

9
vendor/github.com/boltdb/bolt/bolt_ppc64.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build ppc64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View File

@ -11,7 +11,7 @@ import (
)
// flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
@ -27,7 +27,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
}
// Otherwise attempt to obtain an exclusive lock.
err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB)
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
@ -40,8 +40,8 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
}
// funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error {
return syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
func funlock(db *DB) error {
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
}
// mmap memory maps a DB's data file.

View File

@ -11,7 +11,7 @@ import (
)
// flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
@ -32,7 +32,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
} else {
lock.Type = syscall.F_RDLCK
}
err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock)
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
@ -45,13 +45,13 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
}
// funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error {
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock)
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.

View File

@ -16,6 +16,8 @@ var (
)
const (
lockExt = ".lock"
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
@ -46,7 +48,16 @@ func fdatasync(db *DB) error {
}
// flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
// Create a separate lock file on windows because a process
// cannot share an exclusive lock on the same file. This is
// needed during Tx.WriteTo().
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
if err != nil {
return err
}
db.lockfile = f
var t time.Time
for {
// If we're beyond our timeout then return an error.
@ -62,7 +73,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
flag |= flagLockExclusive
}
err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
@ -75,8 +86,11 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
}
// funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error {
return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{})
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path+lockExt)
return err
}
// mmap memory maps a DB's data file.

92
vendor/github.com/boltdb/bolt/db.go generated vendored
View File

@ -36,6 +36,9 @@ const (
DefaultAllocSize = 16 * 1024 * 1024
)
// default page size for db is set to the OS page size.
var defaultPageSize = os.Getpagesize()
// DB represents a collection of buckets persisted to a file on disk.
// All data access is performed through transactions which can be obtained through the DB.
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
@ -93,7 +96,8 @@ type DB struct {
path string
file *os.File
dataref []byte // mmap'ed readonly, write throws SEGV
lockfile *os.File // windows only
dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte
datasz int
filesz int // current on disk file size
@ -106,6 +110,8 @@ type DB struct {
freelist *freelist
stats Stats
pagePool sync.Pool
batchMu sync.Mutex
batch *batch
@ -177,7 +183,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// if !options.ReadOnly.
// The database file is locked using the shared lock (more than one process may
// hold a lock at the same time) otherwise (options.ReadOnly is set).
if err := flock(db.file, !db.readOnly, options.Timeout); err != nil {
if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
_ = db.close()
return nil, err
}
@ -199,12 +205,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
m := db.pageInBuffer(buf[:], 0).meta()
if err := m.validate(); err != nil {
return nil, err
// If we can't read the page size, we can assume it's the same
// as the OS -- since that's how the page size was chosen in the
// first place.
//
// If the first page is invalid and this OS uses a different
// page size than what the database was created with then we
// are out of luck and cannot access the database.
db.pageSize = os.Getpagesize()
} else {
db.pageSize = int(m.pageSize)
}
db.pageSize = int(m.pageSize)
}
}
// Initialize page pool.
db.pagePool = sync.Pool{
New: func() interface{} {
return make([]byte, db.pageSize)
},
}
// Memory map the data file.
if err := db.mmap(options.InitialMmapSize); err != nil {
_ = db.close()
@ -261,12 +282,13 @@ func (db *DB) mmap(minsz int) error {
db.meta0 = db.page(0).meta()
db.meta1 = db.page(1).meta()
// Validate the meta pages.
if err := db.meta0.validate(); err != nil {
return err
}
if err := db.meta1.validate(); err != nil {
return err
// Validate the meta pages. We only return an error if both meta pages fail
// validation, since meta0 failing validation means that it wasn't saved
// properly -- but we can recover using meta1. And vice-versa.
err0 := db.meta0.validate()
err1 := db.meta1.validate()
if err0 != nil && err1 != nil {
return err0
}
return nil
@ -338,6 +360,7 @@ func (db *DB) init() error {
m.root = bucket{root: 3}
m.pgid = 4
m.txid = txid(i)
m.checksum = m.sum64()
}
// Write an empty freelist at page 3.
@ -379,10 +402,13 @@ func (db *DB) Close() error {
}
func (db *DB) close() error {
if !db.opened {
return nil
}
db.opened = false
db.freelist = nil
db.path = ""
// Clear ops.
db.ops.writeAt = nil
@ -397,7 +423,7 @@ func (db *DB) close() error {
// No need to unlock read-only file.
if !db.readOnly {
// Unlock the file.
if err := funlock(db.file); err != nil {
if err := funlock(db); err != nil {
log.Printf("bolt.Close(): funlock error: %s", err)
}
}
@ -409,6 +435,7 @@ func (db *DB) close() error {
db.file = nil
}
db.path = ""
return nil
}
@ -773,16 +800,37 @@ func (db *DB) pageInBuffer(b []byte, id pgid) *page {
// meta retrieves the current meta page reference.
func (db *DB) meta() *meta {
if db.meta0.txid > db.meta1.txid {
return db.meta0
// We have to return the meta with the highest txid which doesn't fail
// validation. Otherwise, we can cause errors when in fact the database is
// in a consistent state. metaA is the one with the higher txid.
metaA := db.meta0
metaB := db.meta1
if db.meta1.txid > db.meta0.txid {
metaA = db.meta1
metaB = db.meta0
}
return db.meta1
// Use higher meta page if valid. Otherwise fallback to previous, if valid.
if err := metaA.validate(); err == nil {
return metaA
} else if err := metaB.validate(); err == nil {
return metaB
}
// This should never be reached, because both meta1 and meta0 were validated
// on mmap() and we do fsync() on every write.
panic("bolt.DB.meta(): invalid meta pages")
}
// allocate returns a contiguous block of memory starting at a given page.
func (db *DB) allocate(count int) (*page, error) {
// Allocate a temporary buffer for the page.
buf := make([]byte, count*db.pageSize)
var buf []byte
if count == 1 {
buf = db.pagePool.Get().([]byte)
} else {
buf = make([]byte, count*db.pageSize)
}
p := (*page)(unsafe.Pointer(&buf[0]))
p.overflow = uint32(count - 1)
@ -824,8 +872,10 @@ func (db *DB) grow(sz int) error {
// Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
if runtime.GOOS != "windows" {
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
}
if err := db.file.Sync(); err != nil {
return fmt.Errorf("file sync error: %s", err)
@ -930,12 +980,12 @@ type meta struct {
// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
func (m *meta) validate() error {
if m.checksum != 0 && m.checksum != m.sum64() {
return ErrChecksum
} else if m.magic != magic {
if m.magic != magic {
return ErrInvalid
} else if m.version != version {
return ErrVersionMismatch
} else if m.checksum != 0 && m.checksum != m.sum64() {
return ErrChecksum
}
return nil
}

View File

@ -12,7 +12,8 @@ var (
// already open.
ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when a data file is not a Bolt-formatted database.
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a

View File

@ -463,43 +463,6 @@ func (n *node) rebalance() {
target = n.prevSibling()
}
// If target node has extra nodes then just move one over.
if target.numChildren() > target.minKeys() {
if useNextSibling {
// Reparent and move node.
if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok {
child.parent.removeChild(child)
child.parent = n
child.parent.children = append(child.parent.children, child)
}
n.inodes = append(n.inodes, target.inodes[0])
target.inodes = target.inodes[1:]
// Update target key on parent.
target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
target.key = target.inodes[0].key
_assert(len(target.key) > 0, "rebalance(1): zero-length node key")
} else {
// Reparent and move node.
if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
child.parent.removeChild(child)
child.parent = n
child.parent.children = append(child.parent.children, child)
}
n.inodes = append(n.inodes, inode{})
copy(n.inodes[1:], n.inodes)
n.inodes[0] = target.inodes[len(target.inodes)-1]
target.inodes = target.inodes[:len(target.inodes)-1]
}
// Update parent key for node.
n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0)
n.key = n.inodes[0].key
_assert(len(n.key) > 0, "rebalance(2): zero-length node key")
return
}
// If both this node and the target node are too small then merge them.
if useNextSibling {
// Reparent all child nodes being moved.

View File

@ -111,13 +111,13 @@ type leafPageElement struct {
// key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
}
// value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize]
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
}
// PageInfo represents human readable information about a page.

66
vendor/github.com/boltdb/bolt/tx.go generated vendored
View File

@ -5,6 +5,7 @@ import (
"io"
"os"
"sort"
"strings"
"time"
"unsafe"
)
@ -202,8 +203,17 @@ func (tx *Tx) Commit() error {
// If strict mode is enabled then perform a consistency check.
// Only the first consistency error is reported in the panic.
if tx.db.StrictMode {
if err, ok := <-tx.Check(); ok {
panic("check fail: " + err.Error())
ch := tx.Check()
var errs []string
for {
err, ok := <-ch
if !ok {
break
}
errs = append(errs, err.Error())
}
if len(errs) > 0 {
panic("check fail: " + strings.Join(errs, "\n"))
}
}
@ -297,12 +307,34 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
}
defer func() { _ = f.Close() }()
// Copy the meta pages.
tx.db.metalock.Lock()
n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
tx.db.metalock.Unlock()
// Generate a meta page. We use the same page data for both meta pages.
buf := make([]byte, tx.db.pageSize)
page := (*page)(unsafe.Pointer(&buf[0]))
page.flags = metaPageFlag
*page.meta() = *tx.meta
// Write meta 0.
page.id = 0
page.meta().checksum = page.meta().sum64()
nn, err := w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta copy: %s", err)
return n, fmt.Errorf("meta 0 copy: %s", err)
}
// Write meta 1 with a lower transaction id.
page.id = 1
page.meta().txid -= 1
page.meta().checksum = page.meta().sum64()
nn, err = w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 1 copy: %s", err)
}
// Move past the meta pages in the file.
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
return n, fmt.Errorf("seek: %s", err)
}
// Copy data pages.
@ -441,6 +473,8 @@ func (tx *Tx) write() error {
for _, p := range tx.pages {
pages = append(pages, p)
}
// Clear out page cache early.
tx.pages = make(map[pgid]*page)
sort.Sort(pages)
// Write pages to disk in order.
@ -485,8 +519,22 @@ func (tx *Tx) write() error {
}
}
// Clear out page cache.
tx.pages = make(map[pgid]*page)
// Put small pages back to page pool.
for _, p := range pages {
// Ignore page sizes over 1 page.
// These are allocated using make() instead of the page pool.
if int(p.overflow) != 0 {
continue
}
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf)
}
return nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,8 +19,8 @@ import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/pkg/capnslog"
)

View File

@ -10,6 +10,7 @@
It has these top-level messages:
User
Permission
Role
*/
package authpb
@ -17,7 +18,7 @@ package authpb
import (
"fmt"
proto "github.com/gogo/protobuf/proto"
proto "github.com/golang/protobuf/proto"
math "math"
)
@ -29,29 +30,74 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// User is a single entry in the bucket authUsers
type User struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Tombstone int64 `protobuf:"varint,3,opt,name=tombstone,proto3" json:"tombstone,omitempty"`
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Permission_Type int32
const (
READ Permission_Type = 0
WRITE Permission_Type = 1
READWRITE Permission_Type = 2
)
var Permission_Type_name = map[int32]string{
0: "READ",
1: "WRITE",
2: "READWRITE",
}
var Permission_Type_value = map[string]int32{
"READ": 0,
"WRITE": 1,
"READWRITE": 2,
}
func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {}
func (x Permission_Type) String() string {
return proto.EnumName(Permission_Type_name, int32(x))
}
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
// User is a single entry in the bucket authUsers
type User struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
}
func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
// Permission is a single entity
type Permission struct {
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
}
func (m *Permission) Reset() { *m = Permission{} }
func (m *Permission) String() string { return proto.CompactTextString(m) }
func (*Permission) ProtoMessage() {}
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
// Role is a single entry in the bucket authRoles
type Role struct {
Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
}
func (m *Role) Reset() { *m = Role{} }
func (m *Role) String() string { return proto.CompactTextString(m) }
func (*Role) ProtoMessage() {}
func (m *Role) Reset() { *m = Role{} }
func (m *Role) String() string { return proto.CompactTextString(m) }
func (*Role) ProtoMessage() {}
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
func init() {
proto.RegisterType((*User)(nil), "authpb.User")
proto.RegisterType((*Permission)(nil), "authpb.Permission")
proto.RegisterType((*Role)(nil), "authpb.Role")
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
}
func (m *User) Marshal() (data []byte, err error) {
size := m.Size()
@ -68,26 +114,67 @@ func (m *User) MarshalTo(data []byte) (int, error) {
_ = i
var l int
_ = l
if m.Name != nil {
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
}
if m.Password != nil {
if len(m.Password) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
i += copy(data[i:], m.Password)
}
}
if m.Tombstone != 0 {
data[i] = 0x18
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintAuth(data, i, uint64(m.Tombstone))
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.Password) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
i += copy(data[i:], m.Password)
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
data[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
data[i] = uint8(l)
i++
i += copy(data[i:], s)
}
}
return i, nil
}
func (m *Permission) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Permission) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PermType != 0 {
data[i] = 0x8
i++
i = encodeVarintAuth(data, i, uint64(m.PermType))
}
if len(m.Key) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if len(m.RangeEnd) > 0 {
data[i] = 0x1a
i++
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
i += copy(data[i:], m.RangeEnd)
}
return i, nil
}
@ -107,12 +194,22 @@ func (m *Role) MarshalTo(data []byte) (int, error) {
_ = i
var l int
_ = l
if m.Name != nil {
if len(m.Name) > 0 {
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.KeyPermission) > 0 {
for _, msg := range m.KeyPermission {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
i = encodeVarintAuth(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
@ -148,20 +245,36 @@ func encodeVarintAuth(data []byte, offset int, v uint64) int {
func (m *User) Size() (n int) {
var l int
_ = l
if m.Name != nil {
l = len(m.Name)
if l > 0 {
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.Password)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
l = len(s)
n += 1 + l + sovAuth(uint64(l))
}
}
if m.Password != nil {
l = len(m.Password)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
return n
}
func (m *Permission) Size() (n int) {
var l int
_ = l
if m.PermType != 0 {
n += 1 + sovAuth(uint64(m.PermType))
}
if m.Tombstone != 0 {
n += 1 + sovAuth(uint64(m.Tombstone))
l = len(m.Key)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.RangeEnd)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
return n
}
@ -169,9 +282,13 @@ func (m *User) Size() (n int) {
func (m *Role) Size() (n int) {
var l int
_ = l
if m.Name != nil {
l = len(m.Name)
if l > 0 {
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.KeyPermission) > 0 {
for _, e := range m.KeyPermission {
l = e.Size()
n += 1 + l + sovAuth(uint64(l))
}
}
@ -283,10 +400,10 @@ func (m *User) Unmarshal(data []byte) error {
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Tombstone", wireType)
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
}
m.Tombstone = 0
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
@ -296,11 +413,152 @@ func (m *User) Unmarshal(data []byte) error {
}
b := data[iNdEx]
iNdEx++
m.Tombstone |= (int64(b) & 0x7F) << shift
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Permission) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Permission: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
}
m.PermType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.PermType |= (Permission_Type(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
if m.RangeEnd == nil {
m.RangeEnd = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
@ -351,7 +609,7 @@ func (m *Role) Unmarshal(data []byte) error {
return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
@ -382,6 +640,37 @@ func (m *Role) Unmarshal(data []byte) error {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.KeyPermission = append(m.KeyPermission, &Permission{})
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
@ -507,3 +796,25 @@ var (
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorAuth = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
}

View File

@ -13,10 +13,25 @@ option (gogoproto.goproto_enum_prefix_all) = false;
message User {
bytes name = 1;
bytes password = 2;
int64 tombstone = 3;
repeated string roles = 3;
}
// Permission is a single entity
message Permission {
enum Type {
READ = 0;
WRITE = 1;
READWRITE = 2;
}
Type permType = 1;
bytes key = 2;
bytes range_end = 3;
}
// Role is a single entry in the bucket authRoles
message Role {
bytes name = 2;
bytes name = 1;
repeated Permission keyPermission = 2;
}

16
vendor/github.com/coreos/etcd/auth/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package auth provides client role authentication for accessing keys in etcd.
package auth

219
vendor/github.com/coreos/etcd/auth/range_perm_cache.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"bytes"
"sort"
"github.com/coreos/etcd/auth/authpb"
"github.com/coreos/etcd/mvcc/backend"
)
// isSubset returns true if a is a subset of b
func isSubset(a, b *rangePerm) bool {
switch {
case len(a.end) == 0 && len(b.end) == 0:
// a, b are both keys
return bytes.Equal(a.begin, b.begin)
case len(b.end) == 0:
// b is a key, a is a range
return false
case len(a.end) == 0:
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.begin, b.end) <= 0
default:
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.end, b.end) <= 0
}
}
func isRangeEqual(a, b *rangePerm) bool {
return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end)
}
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
// TODO(mitake): currently it is O(n^2), we need a better algorithm
newp := make([]*rangePerm, 0)
for i := range perms {
skip := false
for j := range perms {
if i == j {
continue
}
if isRangeEqual(perms[i], perms[j]) {
// if ranges are equal, we only keep the first range.
if i > j {
skip = true
break
}
} else if isSubset(perms[i], perms[j]) {
// if a range is a strict subset of the other one, we skip the subset.
skip = true
break
}
}
if skip {
continue
}
newp = append(newp, perms[i])
}
return newp
}
// mergeRangePerms merges adjacent rangePerms.
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
merged := make([]*rangePerm, 0)
perms = removeSubsetRangePerms(perms)
sort.Sort(RangePermSliceByBegin(perms))
i := 0
for i < len(perms) {
begin, next := i, i
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) != -1 {
next++
}
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
i = next + 1
}
return merged
}
func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
user := getUser(tx, userName)
if user == nil {
plog.Errorf("invalid user name %s", userName)
return nil
}
var readPerms, writePerms []*rangePerm
for _, roleName := range user.Roles {
role := getRole(tx, roleName)
if role == nil {
continue
}
for _, perm := range role.KeyPermission {
rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd}
switch perm.PermType {
case authpb.READWRITE:
readPerms = append(readPerms, rp)
writePerms = append(writePerms, rp)
case authpb.READ:
readPerms = append(readPerms, rp)
case authpb.WRITE:
writePerms = append(writePerms, rp)
}
}
}
return &unifiedRangePermissions{
readPerms: mergeRangePerms(readPerms),
writePerms: mergeRangePerms(writePerms),
}
}
func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
var tocheck []*rangePerm
switch permtyp {
case authpb.READ:
tocheck = cachedPerms.readPerms
case authpb.WRITE:
tocheck = cachedPerms.writePerms
default:
plog.Panicf("unknown auth type: %v", permtyp)
}
requiredPerm := &rangePerm{begin: key, end: rangeEnd}
for _, perm := range tocheck {
if isSubset(requiredPerm, perm) {
return true
}
}
return false
}
func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
// assumption: tx is Lock()ed
_, ok := as.rangePermCache[userName]
if !ok {
perms := getMergedPerms(tx, userName)
if perms == nil {
plog.Errorf("failed to create a unified permission of user %s", userName)
return false
}
as.rangePermCache[userName] = perms
}
return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp)
}
func (as *authStore) clearCachedPerm() {
as.rangePermCache = make(map[string]*unifiedRangePermissions)
}
func (as *authStore) invalidateCachedPerm(userName string) {
delete(as.rangePermCache, userName)
}
type unifiedRangePermissions struct {
// readPerms[i] and readPerms[j] (i != j) don't overlap
readPerms []*rangePerm
// writePerms[i] and writePerms[j] (i != j) don't overlap, too
writePerms []*rangePerm
}
type rangePerm struct {
begin, end []byte
}
type RangePermSliceByBegin []*rangePerm
func (slice RangePermSliceByBegin) Len() int {
return len(slice)
}
func (slice RangePermSliceByBegin) Less(i, j int) bool {
switch bytes.Compare(slice[i].begin, slice[j].begin) {
case 0: // begin(i) == begin(j)
return bytes.Compare(slice[i].end, slice[j].end) == -1
case -1: // begin(i) < begin(j)
return true
default:
return false
}
}
func (slice RangePermSliceByBegin) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}

55
vendor/github.com/coreos/etcd/auth/simple_token.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
// CAUTION: This randum number based token mechanism is only for testing purpose.
// JWT based mechanism will be added in the near future.
import (
"crypto/rand"
"math/big"
)
const (
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
defaultSimpleTokenLength = 16
)
func (as *authStore) GenSimpleToken() (string, error) {
ret := make([]byte, defaultSimpleTokenLength)
for i := 0; i < defaultSimpleTokenLength; i++ {
bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
if err != nil {
return "", err
}
ret[i] = letters[bInt.Int64()]
}
return string(ret), nil
}
func (as *authStore) assignSimpleTokenToUser(username, token string) {
as.simpleTokensMu.Lock()
_, ok := as.simpleTokens[token]
if ok {
plog.Panicf("token %s is alredy used", token)
}
as.simpleTokens[token] = username
as.simpleTokensMu.Unlock()
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,17 +15,25 @@
package auth
import (
"bytes"
"errors"
"fmt"
"sort"
"strings"
"sync"
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/pkg/capnslog"
"golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
)
var (
enableFlagKey = []byte("authEnabled")
authEnabled = []byte{1}
authDisabled = []byte{0}
authBucketName = []byte("auth")
authUsersBucketName = []byte("authUsers")
@ -33,14 +41,32 @@ var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth")
ErrUserAlreadyExist = errors.New("auth: user already exists")
ErrUserNotFound = errors.New("auth: user not found")
ErrRoleAlreadyExist = errors.New("auth: role already exists")
ErrRootUserNotExist = errors.New("auth: root user does not exist")
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
ErrUserAlreadyExist = errors.New("auth: user already exists")
ErrUserNotFound = errors.New("auth: user not found")
ErrRoleAlreadyExist = errors.New("auth: role already exists")
ErrRoleNotFound = errors.New("auth: role not found")
ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
ErrPermissionDenied = errors.New("auth: permission denied")
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
)
const (
rootUser = "root"
rootRole = "root"
)
type AuthStore interface {
// AuthEnable() turns on the authentication feature
AuthEnable()
// AuthEnable turns on the authentication feature
AuthEnable() error
// AuthDisable turns off the authentication feature
AuthDisable()
// Authenticate does authentication based on given user name and password
Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
// Recover recovers the state of auth store from the given backend
Recover(b backend.Backend)
@ -54,35 +80,157 @@ type AuthStore interface {
// UserChangePassword changes a password of a user
UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
// UserGrantRole grants a role to the user
UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
// UserGet gets the detailed information of a users
UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
// UserRevokeRole revokes a role of a user
UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role
RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role
RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
// RoleGet gets the detailed information of a role
RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
// RoleRevokePermission gets the detailed information of a role
RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
// RoleDelete gets the detailed information of a role
RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
// UserList gets a list of all users
UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
// RoleList gets a list of all roles
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
// UsernameFromToken gets a username from the given Token
UsernameFromToken(token string) (string, bool)
// IsPutPermitted checks put permission of the user
IsPutPermitted(username string, key []byte) bool
// IsRangePermitted checks range permission of the user
IsRangePermitted(username string, key, rangeEnd []byte) bool
// IsDeleteRangePermitted checks delete-range permission of the user
IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool
// IsAdminPermitted checks admin permission of the user
IsAdminPermitted(username string) bool
// GenSimpleToken produces a simple random string
GenSimpleToken() (string, error)
}
type authStore struct {
be backend.Backend
be backend.Backend
enabled bool
enabledMu sync.RWMutex
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
simpleTokensMu sync.RWMutex
simpleTokens map[string]string // token -> username
}
func (as *authStore) AuthEnable() {
value := []byte{1}
func (as *authStore) AuthEnable() error {
b := as.be
tx := b.BatchTx()
tx.Lock()
tx.UnsafePut(authBucketName, enableFlagKey, value)
defer func() {
tx.Unlock()
b.ForceCommit()
}()
u := getUser(tx, rootUser)
if u == nil {
return ErrRootUserNotExist
}
if !hasRootRole(u) {
return ErrRootRoleNotExist
}
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
as.enabledMu.Lock()
as.enabled = true
as.enabledMu.Unlock()
as.rangePermCache = make(map[string]*unifiedRangePermissions)
plog.Noticef("Authentication enabled")
return nil
}
func (as *authStore) AuthDisable() {
b := as.be
tx := b.BatchTx()
tx.Lock()
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
tx.Unlock()
b.ForceCommit()
plog.Noticef("Authentication enabled")
as.enabledMu.Lock()
as.enabled = false
as.enabledMu.Unlock()
plog.Noticef("Authentication disabled")
}
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
index := ctx.Value("index").(uint64)
simpleToken := ctx.Value("simpleToken").(string)
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, username)
if user == nil {
return nil, ErrAuthFailed
}
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
plog.Noticef("authentication failed, invalid password for user %s", username)
return &pb.AuthenticateResponse{}, ErrAuthFailed
}
token := fmt.Sprintf("%s.%d", simpleToken, index)
as.assignSimpleTokenToUser(username, token)
plog.Infof("authorized %s, token is %s", username, token)
return &pb.AuthenticateResponse{Token: token}, nil
}
func (as *authStore) Recover(be backend.Backend) {
enabled := false
as.be = be
// TODO(mitake): recovery process
tx := be.BatchTx()
tx.Lock()
_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
if len(vs) == 1 {
if bytes.Equal(vs[0], authEnabled) {
enabled = true
}
}
tx.Unlock()
as.enabledMu.Lock()
as.enabled = enabled
as.enabledMu.Unlock()
}
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
plog.Noticef("adding a new user: %s", r.Name)
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
if err != nil {
plog.Errorf("failed to hash password: %s", err)
@ -93,23 +241,17 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
tx.Lock()
defer tx.Unlock()
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(r.Name), nil, 0)
if len(vs) != 0 {
return &pb.AuthUserAddResponse{}, ErrUserAlreadyExist
user := getUser(tx, r.Name)
if user != nil {
return nil, ErrUserAlreadyExist
}
newUser := authpb.User{
newUser := &authpb.User{
Name: []byte(r.Name),
Password: hashed,
}
marshaledUser, merr := newUser.Marshal()
if merr != nil {
plog.Errorf("failed to marshal a new user data: %s", merr)
return nil, merr
}
tx.UnsafePut(authUsersBucketName, []byte(r.Name), marshaledUser)
putUser(tx, newUser)
plog.Noticef("added a new user: %s", r.Name)
@ -121,12 +263,12 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
tx.Lock()
defer tx.Unlock()
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(r.Name), nil, 0)
if len(vs) != 1 {
return &pb.AuthUserDeleteResponse{}, ErrUserNotFound
user := getUser(tx, r.Name)
if user == nil {
return nil, ErrUserNotFound
}
tx.UnsafeDelete(authUsersBucketName, []byte(r.Name))
delUser(tx, r.Name)
plog.Noticef("deleted a user: %s", r.Name)
@ -146,36 +288,230 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
tx.Lock()
defer tx.Unlock()
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(r.Name), nil, 0)
if len(vs) != 1 {
return &pb.AuthUserChangePasswordResponse{}, ErrUserNotFound
user := getUser(tx, r.Name)
if user == nil {
return nil, ErrUserNotFound
}
updatedUser := authpb.User{
updatedUser := &authpb.User{
Name: []byte(r.Name),
Roles: user.Roles,
Password: hashed,
}
marshaledUser, merr := updatedUser.Marshal()
if merr != nil {
plog.Errorf("failed to marshal a new user data: %s", merr)
return nil, merr
}
tx.UnsafePut(authUsersBucketName, []byte(r.Name), marshaledUser)
putUser(tx, updatedUser)
plog.Noticef("changed a password of a user: %s", r.Name)
return &pb.AuthUserChangePasswordResponse{}, nil
}
func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, r.User)
if user == nil {
return nil, ErrUserNotFound
}
if r.Role != rootRole {
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
}
idx := sort.SearchStrings(user.Roles, r.Role)
if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 {
plog.Warningf("user %s is already granted role %s", r.User, r.Role)
return &pb.AuthUserGrantRoleResponse{}, nil
}
user.Roles = append(user.Roles, r.Role)
sort.Sort(sort.StringSlice(user.Roles))
putUser(tx, user)
as.invalidateCachedPerm(r.User)
plog.Noticef("granted role %s to user %s", r.Role, r.User)
return &pb.AuthUserGrantRoleResponse{}, nil
}
func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthUserGetResponse
user := getUser(tx, r.Name)
if user == nil {
return nil, ErrUserNotFound
}
for _, role := range user.Roles {
resp.Roles = append(resp.Roles, role)
}
return &resp, nil
}
func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthUserListResponse
users := getAllUsers(tx)
for _, u := range users {
resp.Users = append(resp.Users, string(u.Name))
}
return &resp, nil
}
func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, r.Name)
if user == nil {
return nil, ErrUserNotFound
}
updatedUser := &authpb.User{
Name: user.Name,
Password: user.Password,
}
for _, role := range user.Roles {
if strings.Compare(role, r.Role) != 0 {
updatedUser.Roles = append(updatedUser.Roles, role)
}
}
if len(updatedUser.Roles) == len(user.Roles) {
return nil, ErrRoleNotGranted
}
putUser(tx, updatedUser)
as.invalidateCachedPerm(r.Name)
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
return &pb.AuthUserRevokeRoleResponse{}, nil
}
func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthRoleGetResponse
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
for _, perm := range role.KeyPermission {
resp.Perm = append(resp.Perm, perm)
}
return &resp, nil
}
func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthRoleListResponse
roles := getAllRoles(tx)
for _, r := range roles {
resp.Roles = append(resp.Roles, string(r.Name))
}
return &resp, nil
}
func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
updatedRole := &authpb.Role{
Name: role.Name,
}
for _, perm := range role.KeyPermission {
if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) {
updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
}
}
if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
return nil, ErrPermissionNotGranted
}
putRole(tx, updatedRole)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
return &pb.AuthRoleRevokePermissionResponse{}, nil
}
func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
// TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles
//
// Assume a case like below:
// create a role r1
// create a user u1 and grant r1 to u1
// delete r1
//
// After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1,
// the new r1 is automatically granted u1.
// In some cases, it would be confusing. So we need to provide an option for deleting the grant relation
// from all users.
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
delRole(tx, r.Role)
plog.Noticef("deleted role %s", r.Role)
return &pb.AuthRoleDeleteResponse{}, nil
}
func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
_, vs := tx.UnsafeRange(authRolesBucketName, []byte(r.Name), nil, 0)
if len(vs) != 0 {
role := getRole(tx, r.Name)
if role != nil {
return nil, ErrRoleAlreadyExist
}
@ -183,18 +519,227 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
Name: []byte(r.Name),
}
marshaledRole, err := newRole.Marshal()
if err != nil {
return nil, err
}
tx.UnsafePut(authRolesBucketName, []byte(r.Name), marshaledRole)
putRole(tx, newRole)
plog.Noticef("Role %s is created", r.Name)
return &pb.AuthRoleAddResponse{}, nil
}
func (as *authStore) UsernameFromToken(token string) (string, bool) {
as.simpleTokensMu.RLock()
defer as.simpleTokensMu.RUnlock()
t, ok := as.simpleTokens[token]
return t, ok
}
type permSlice []*authpb.Permission
func (perms permSlice) Len() int {
return len(perms)
}
func (perms permSlice) Less(i, j int) bool {
return bytes.Compare(perms[i].Key, perms[j].Key) < 0
}
func (perms permSlice) Swap(i, j int) {
perms[i], perms[j] = perms[j], perms[i]
}
func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
role := getRole(tx, r.Name)
if role == nil {
return nil, ErrRoleNotFound
}
idx := sort.Search(len(role.KeyPermission), func(i int) bool {
return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0
})
if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
// update existing permission
role.KeyPermission[idx].PermType = r.Perm.PermType
} else {
// append new permission to the role
newPerm := &authpb.Permission{
Key: []byte(r.Perm.Key),
RangeEnd: []byte(r.Perm.RangeEnd),
PermType: r.Perm.PermType,
}
role.KeyPermission = append(role.KeyPermission, newPerm)
sort.Sort(permSlice(role.KeyPermission))
}
putRole(tx, role)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
return &pb.AuthRoleGrantPermissionResponse{}, nil
}
func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTyp authpb.Permission_Type) bool {
// TODO(mitake): this function would be costly so we need a caching mechanism
if !as.isAuthEnabled() {
return true
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, userName)
if user == nil {
plog.Errorf("invalid user name %s for permission checking", userName)
return false
}
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
return true
}
return false
}
func (as *authStore) IsPutPermitted(username string, key []byte) bool {
return as.isOpPermitted(username, key, nil, authpb.WRITE)
}
func (as *authStore) IsRangePermitted(username string, key, rangeEnd []byte) bool {
return as.isOpPermitted(username, key, rangeEnd, authpb.READ)
}
func (as *authStore) IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool {
return as.isOpPermitted(username, key, rangeEnd, authpb.WRITE)
}
func (as *authStore) IsAdminPermitted(username string) bool {
if !as.isAuthEnabled() {
return true
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
u := getUser(tx, username)
if u == nil {
return false
}
return hasRootRole(u)
}
func getUser(tx backend.BatchTx, username string) *authpb.User {
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0)
if len(vs) == 0 {
return nil
}
user := &authpb.User{}
err := user.Unmarshal(vs[0])
if err != nil {
plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err)
}
return user
}
func getAllUsers(tx backend.BatchTx) []*authpb.User {
_, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
if len(vs) == 0 {
return nil
}
var users []*authpb.User
for _, v := range vs {
user := &authpb.User{}
err := user.Unmarshal(v)
if err != nil {
plog.Panicf("failed to unmarshal user struct: %s", err)
}
users = append(users, user)
}
return users
}
func putUser(tx backend.BatchTx, user *authpb.User) {
b, err := user.Marshal()
if err != nil {
plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err)
}
tx.UnsafePut(authUsersBucketName, user.Name, b)
}
func delUser(tx backend.BatchTx, username string) {
tx.UnsafeDelete(authUsersBucketName, []byte(username))
}
func getRole(tx backend.BatchTx, rolename string) *authpb.Role {
_, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0)
if len(vs) == 0 {
return nil
}
role := &authpb.Role{}
err := role.Unmarshal(vs[0])
if err != nil {
plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err)
}
return role
}
func getAllRoles(tx backend.BatchTx) []*authpb.Role {
_, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1)
if len(vs) == 0 {
return nil
}
var roles []*authpb.Role
for _, v := range vs {
role := &authpb.Role{}
err := role.Unmarshal(v)
if err != nil {
plog.Panicf("failed to unmarshal role struct: %s", err)
}
roles = append(roles, role)
}
return roles
}
func putRole(tx backend.BatchTx, role *authpb.Role) {
b, err := role.Marshal()
if err != nil {
plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err)
}
tx.UnsafePut(authRolesBucketName, []byte(role.Name), b)
}
func delRole(tx backend.BatchTx, rolename string) {
tx.UnsafeDelete(authRolesBucketName, []byte(rolename))
}
func (as *authStore) isAuthEnabled() bool {
as.enabledMu.RLock()
defer as.enabledMu.RUnlock()
return as.enabled
}
func NewAuthStore(be backend.Backend) *authStore {
tx := be.BatchTx()
tx.Lock()
@ -207,6 +752,16 @@ func NewAuthStore(be backend.Backend) *authStore {
be.ForceCommit()
return &authStore{
be: be,
be: be,
simpleTokens: make(map[string]string),
}
}
func hasRootRole(u *authpb.User) bool {
for _, r := range u.Roles {
if r == rootRole {
return true
}
}
return false
}

View File

@ -4,9 +4,11 @@ etcd/client is the Go client library for etcd.
[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client)
etcd uses go's `vendor` directory to manage external dependencies. If `client` is imported
outside of etcd, simply copy `client` to the `vendor` directory or use tools like godep to
manage your own dependency, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
etcd uses `cmd/vendor` directory to store external dependencies, which are
to be compiled into etcd release binaries. `client` can be imported without
vendoring. For full compatibility, it is recommended to vendor builds using
etcd's vendored packages, using tools like godep, as in
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
## Install

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -36,6 +36,12 @@ type User struct {
Revoke []string `json:"revoke,omitempty"`
}
// userListEntry is the user representation given by the server for ListUsers
type userListEntry struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
type UserRoles struct {
User string `json:"user"`
Roles []Role `json:"roles"`
@ -194,7 +200,7 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
}
var userList struct {
Users []User `json:"users"`
Users []userListEntry `json:"users"`
}
if err = json.Unmarshal(body, &userList); err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -37,6 +37,10 @@ var (
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
)
var DefaultRequestTimeout = 5 * time.Second
@ -335,6 +339,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
var body []byte
var err error
cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ {
k := i % leps
@ -348,6 +353,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
if isOneShot {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
@ -358,6 +366,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
if isOneShot {
return nil, nil, cerr.Errors[0]
}
continue
}
if k != pinned {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -337,7 +337,11 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
act.Dir = opts.Dir
}
resp, body, err := k.client.Do(ctx, act)
doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
@ -385,7 +389,8 @@ func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOption
act.Recursive = opts.Recursive
}
resp, body, err := k.client.Do(ctx, act)
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -40,9 +40,11 @@ if err != nil {
// use the response
```
etcd uses go's `vendor` directory to manage external dependencies. If `clientv3` is imported
outside of etcd, simply copy `clientv3` to the `vendor` directory or use tools like godep to
manage your own dependency, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
etcd uses `cmd/vendor` directory to store external dependencies, which are
to be compiled into etcd release binaries. `client` can be imported without
vendoring. For full compatibility, it is recommended to vendor builds using
etcd's vendored packages, using tools like godep, as in
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
## Error Handling
@ -50,21 +52,22 @@ For more detail, please read [Go vendor design](https://golang.org/s/go15vendor)
etcd client returns 2 types of errors:
1. context error: canceled or deadline exceeded.
2. gRPC error: see [v3rpc/error](https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go).
2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
Here is the example code to handle client errors:
```go
resp, err := kvc.Put(ctx, "", "")
if err != nil {
if err == context.Canceled {
// ctx is canceled by another routine
} else if err == context.DeadlineExceeded {
// ctx is attached with a deadline and it exceeded
} else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
// process (verr.Errors)
} else {
// bad cluster endpoints, which are not etcd servers
switch err {
case context.Canceled:
log.Fatalf("ctx is canceled by another routine: %v", err)
case context.DeadlineExceeded:
log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
case rpctypes.ErrEmptyKey:
log.Fatalf("client-side error: %v", err)
default:
log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
}
}
```

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,23 +15,49 @@
package clientv3
import (
"fmt"
"strings"
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
AuthEnableResponse pb.AuthEnableResponse
AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthRoleAddResponse pb.AuthRoleAddResponse
AuthEnableResponse pb.AuthEnableResponse
AuthDisableResponse pb.AuthDisableResponse
AuthenticateResponse pb.AuthenticateResponse
AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
AuthUserGetResponse pb.AuthUserGetResponse
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
AuthRoleAddResponse pb.AuthRoleAddResponse
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
AuthRoleGetResponse pb.AuthRoleGetResponse
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
AuthUserListResponse pb.AuthUserListResponse
AuthRoleListResponse pb.AuthRoleListResponse
PermissionType authpb.Permission_Type
)
const (
PermRead = authpb.READ
PermWrite = authpb.WRITE
PermReadWrite = authpb.READWRITE
)
type Auth interface {
// AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
// AuthDisable disables auth of an etcd cluster.
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
// UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
@ -41,8 +67,35 @@ type Auth interface {
// UserChangePassword changes a password of a user.
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
// RoleAdd adds a new user to an etcd cluster.
// UserGrantRole grants a role to a user.
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
// UserGet gets a detailed information of a user.
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
// UserList gets a list of all users.
UserList(ctx context.Context) (*AuthUserListResponse, error)
// UserRevokeRole revokes a role of a user.
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role to an etcd cluster.
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role.
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
// RoleGet gets a detailed information of a role.
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
// RoleList gets a list of all roles.
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
// RoleRevokePermission revokes a permission from a role.
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
// RoleDelete deletes a role.
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
}
type auth struct {
@ -62,26 +115,115 @@ func NewAuth(c *Client) Auth {
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
return (*AuthEnableResponse)(resp), err
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
return (*AuthUserAddResponse)(resp), err
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
return (*AuthUserDeleteResponse)(resp), err
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, grpc.FailFast(false))
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
return (*AuthUserChangePasswordResponse)(resp), err
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, grpc.FailFast(false))
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, grpc.FailFast(false))
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
return (*AuthRoleAddResponse)(resp), err
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, grpc.FailFast(false))
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
perm := &authpb.Permission{
Key: []byte(key),
RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, grpc.FailFast(false))
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, grpc.FailFast(false))
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, grpc.FailFast(false))
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
func StrToPermissionType(s string) (PermissionType, error) {
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
if ok {
return PermissionType(val), nil
}
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
}
type authenticator struct {
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}
func (auth *authenticator) close() {
auth.conn.Close()
}
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return nil, err
}
return &authenticator{
conn: conn,
remote: pb.NewAuthClient(conn),
}, nil
}

64
vendor/github.com/coreos/etcd/clientv3/balancer.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"net/url"
"strings"
"sync/atomic"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// eps are the client's endpoints stripped of any URL scheme
eps []string
ch chan []grpc.Address
numGets uint32
}
func newSimpleBalancer(eps []string) grpc.Balancer {
ch := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
ch <- addrs
return &simpleBalancer{eps: eps, ch: ch}
}
func (b *simpleBalancer) Start(target string) error { return nil }
func (b *simpleBalancer) Up(addr grpc.Address) func(error) { return func(error) {} }
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
v := atomic.AddUint32(&b.numGets, 1)
ep := b.eps[v%uint32(len(b.eps))]
return grpc.Address{Addr: getHost(ep)}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.ch }
func (b *simpleBalancer) Close() error {
close(b.ch)
return nil
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,18 +15,22 @@
package clientv3
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/url"
"strings"
"sync"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
)
var (
@ -42,21 +46,21 @@ type Client struct {
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportAuthenticator
mu sync.RWMutex // protects connection selection and error list
errors []error // errors passed to retryConnection
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportCredentials
ctx context.Context
cancel context.CancelFunc
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if cfg.RetryDialer == nil {
cfg.RetryDialer = dialEndpointList
}
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
@ -80,17 +84,8 @@ func NewFromConfigFile(path string) (*Client, error) {
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.mu.Lock()
if c.cancel == nil {
c.mu.Unlock()
return nil
}
c.cancel()
c.cancel = nil
c.mu.Unlock()
c.Watcher.Close()
c.Lease.Close()
return c.conn.Close()
return toErr(c.ctx, c.conn.Close())
}
// Ctx is a context for "out of band" messages (e.g., for sending
@ -101,72 +96,157 @@ func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
// Errors returns all errors that have been observed since called last.
func (c *Client) Errors() (errs []error) {
c.mu.Lock()
defer c.mu.Unlock()
errs = c.errors
c.errors = nil
return errs
type authTokenCredential struct {
token string
}
// Dial establishes a connection for a given endpoint using the client's config
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
func (cred authTokenCredential) RequireTransportSecurity() bool {
return false
}
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
return map[string]string{
"token": cred.token,
}, nil
}
func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) {
proto = "tcp"
host = endpoint
creds = c.creds
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return
}
// strip scheme:// prefix since grpc dials by host
host = url.Host
switch url.Scheme {
case "unix":
proto = "unix"
case "http":
creds = nil
case "https":
if creds != nil {
break
}
tlsconfig := &tls.Config{}
emptyCreds := credentials.NewTLS(tlsconfig)
creds = &emptyCreds
default:
return "", "", nil
}
return
}
// dialSetupOpts gives the dial opts prioer to any authentication
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) []grpc.DialOption {
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithTimeout(c.cfg.DialTimeout),
}
if c.creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*c.creds))
} else {
opts = append(opts, grpc.WithInsecure())
opts = append(opts, dopts...)
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep := make(map[string]string)
for i := range c.cfg.Endpoints {
_, host, _ := c.dialTarget(c.cfg.Endpoints[i])
host2ep[host] = c.cfg.Endpoints[i]
}
proto := "tcp"
if url, uerr := url.Parse(endpoint); uerr == nil && url.Scheme == "unix" {
proto = "unix"
// strip unix:// prefix so certs work
endpoint = url.Host
}
f := func(a string, t time.Duration) (net.Conn, error) {
f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := c.dialTarget(host2ep[host])
if proto == "" {
return nil, fmt.Errorf("unknown scheme for %q", host)
}
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
return net.DialTimeout(proto, a, t)
return net.DialTimeout(proto, host, t)
}
opts = append(opts, grpc.WithDialer(f))
conn, err := grpc.Dial(endpoint, opts...)
_, _, creds := c.dialTarget(endpoint)
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
return opts
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
return c.dial(endpoint)
}
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts := c.dialSetupOpts(endpoint, dopts...)
host := getHost(endpoint)
if c.Username != "" && c.Password != "" {
// use dial options without dopts to avoid reusing the client balancer
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
if err != nil {
return nil, err
}
defer auth.close()
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
}
conn, err := grpc.Dial(host, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{RetryDialer: dialEndpointList}
cfg = &Config{}
}
var creds *credentials.TransportAuthenticator
var creds *credentials.TransportCredentials
if cfg.TLS != nil {
c := credentials.NewTLS(cfg.TLS)
creds = &c
}
// use a temporary skeleton client to bootstrap first connection
ctx, cancel := context.WithCancel(context.TODO())
conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds, ctx: ctx})
if err != nil {
return nil, err
}
client := &Client{
conn: conn,
conn: nil,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
b := newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(b))
if err != nil {
return nil, err
}
client.conn = conn
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
@ -184,60 +264,35 @@ func newClient(cfg *Config) (*Client, error) {
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn {
c.mu.RLock()
defer c.mu.RUnlock()
return c.conn
}
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
// retryConnection establishes a new connection
func (c *Client) retryConnection(oldConn *grpc.ClientConn, err error) (*grpc.ClientConn, error) {
c.mu.Lock()
defer c.mu.Unlock()
if err != nil {
c.errors = append(c.errors, err)
}
if c.cancel == nil {
return nil, c.ctx.Err()
}
if oldConn != c.conn {
// conn has already been updated
return c.conn, nil
}
oldConn.Close()
if st, _ := oldConn.State(); st != grpc.Shutdown {
// wait for shutdown so grpc doesn't leak sleeping goroutines
oldConn.WaitForStateChange(c.ctx, st)
}
conn, dialErr := c.cfg.RetryDialer(c)
if dialErr != nil {
c.errors = append(c.errors, dialErr)
return nil, dialErr
}
c.conn = conn
return c.conn, nil
}
// dialEndpointList attempts to connect to each endpoint in order until a
// connection is established.
func dialEndpointList(c *Client) (*grpc.ClientConn, error) {
var err error
for _, ep := range c.Endpoints() {
conn, curErr := c.Dial(ep)
if curErr != nil {
err = curErr
} else {
return conn, nil
}
}
return nil, err
}
// isHalted returns true if the given error and context indicate no forward
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHalted(ctx context.Context, err error) bool {
isRPCError := strings.HasPrefix(grpc.ErrorDesc(err), "etcdserver: ")
return isRPCError || ctx.Err() != nil
func isHaltErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return true
}
if err == nil {
return false
}
eErr := rpctypes.Error(err)
if _, ok := eErr.(rpctypes.EtcdError); ok {
return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader
}
// treat etcdserver errors not recognized by the client as halting
return strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) ||
strings.Contains(err.Error(), "etcdserver:")
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
if ctx.Err() != nil && strings.Contains(err.Error(), "context") {
err = ctx.Err()
} else if strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) {
err = grpc.ErrClientConnClosing
}
return err
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,8 +15,6 @@
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
@ -34,9 +32,6 @@ type Cluster interface {
// MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error)
// MemberLeader returns the current leader member.
MemberLeader(ctx context.Context) (*Member, error)
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
@ -48,70 +43,47 @@ type Cluster interface {
}
type cluster struct {
c *Client
mu sync.Mutex
conn *grpc.ClientConn // conn in-use
remote pb.ClusterClient
}
func NewCluster(c *Client) Cluster {
conn := c.ActiveConnection()
return &cluster{
c: c,
conn: conn,
remote: pb.NewClusterClient(conn),
}
return &cluster{remote: pb.NewClusterClient(c.conn)}
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.getRemote().MemberAdd(ctx, r)
resp, err := c.remote.MemberAdd(ctx, r, grpc.FailFast(false))
if err == nil {
return (*MemberAddResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
go c.switchRemote(err)
return nil, err
return nil, toErr(ctx, err)
}
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.getRemote().MemberRemove(ctx, r)
resp, err := c.remote.MemberRemove(ctx, r, grpc.FailFast(false))
if err == nil {
return (*MemberRemoveResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
go c.switchRemote(err)
return nil, err
return nil, toErr(ctx, err)
}
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update.
for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.getRemote().MemberUpdate(ctx, r)
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
err = c.switchRemote(err)
if err != nil {
return nil, err
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
@ -119,52 +91,12 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
for {
resp, err := c.getRemote().MemberList(ctx, &pb.MemberListRequest{})
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
if err == nil {
return (*MemberListResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
err = c.switchRemote(err)
if err != nil {
return nil, err
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (c *cluster) MemberLeader(ctx context.Context) (*Member, error) {
resp, err := c.MemberList(ctx)
if err != nil {
return nil, err
}
for _, m := range resp.Members {
if m.IsLeader {
return (*Member)(m), nil
}
}
return nil, nil
}
func (c *cluster) getRemote() pb.ClusterClient {
c.mu.Lock()
defer c.mu.Unlock()
return c.remote
}
func (c *cluster) switchRemote(prevErr error) error {
newConn, err := c.c.retryConnection(c.conn, prevErr)
if err != nil {
return err
}
c.mu.Lock()
defer c.mu.Unlock()
c.conn = newConn
c.remote = pb.NewClusterClient(c.conn)
return nil
}

53
vendor/github.com/coreos/etcd/clientv3/compact_op.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
// CompactOp represents a compact operation.
type CompactOp struct {
revision int64
physical bool
}
// CompactOption configures compact operation.
type CompactOption func(*CompactOp)
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
for _, opt := range opts {
opt(op)
}
}
// OpCompact wraps slice CompactOption to create a CompactOp.
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
ret := CompactOp{revision: rev}
ret.applyCompactOpts(opts)
return ret
}
func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes compact RPC call wait until
// the compaction is physically applied to the local database
// such that compacted entries are totally removed from the
// backend database.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -22,19 +22,12 @@ import (
"github.com/coreos/etcd/pkg/tlsutil"
"github.com/ghodss/yaml"
"google.golang.org/grpc"
)
// EndpointDialer is a policy for choosing which endpoint to dial next
type EndpointDialer func(*Client) (*grpc.ClientConn, error)
type Config struct {
// Endpoints is a list of URLs
Endpoints []string
// RetryDialer chooses the next endpoint to use
RetryDialer EndpointDialer
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration
@ -43,9 +36,15 @@ type Config struct {
// Logger is the logger used by client library.
Logger Logger
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
}
type YamlConfig struct {
type yamlConfig struct {
Endpoints []string `json:"endpoints"`
DialTimeout time.Duration `json:"dial-timeout"`
InsecureTransport bool `json:"insecure-transport"`
@ -61,7 +60,7 @@ func configFromFile(fpath string) (*Config, error) {
return nil, err
}
yc := &YamlConfig{}
yc := &yamlConfig{}
err = yaml.Unmarshal(b, yc)
if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,18 +15,17 @@
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
PutResponse pb.PutResponse
GetResponse pb.RangeResponse
DeleteResponse pb.DeleteRangeResponse
TxnResponse pb.TxnResponse
CompactResponse pb.CompactionResponse
PutResponse pb.PutResponse
GetResponse pb.RangeResponse
DeleteResponse pb.DeleteRangeResponse
TxnResponse pb.TxnResponse
)
type KV interface {
@ -50,7 +49,7 @@ type KV interface {
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
// Compact compacts etcd KV history before the given rev.
Compact(ctx context.Context, rev int64) error
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
// Do applies a single Op on KV without a transaction.
// Do is useful when declaring operations to be issued at a later time
@ -61,7 +60,7 @@ type KV interface {
// Do is useful when creating arbitrary operations to be issued at a
// later time; the user can range over the operations, calling Do to
// execute them. Get/Put/Delete, on the other hand, are best suited
// for when the operation should be issued at the time of declaration.
// for when the operation should be issued at the time of declaration.
Do(ctx context.Context, op Op) (OpResponse, error)
// Txn creates a transaction.
@ -74,54 +73,39 @@ type OpResponse struct {
del *DeleteResponse
}
type kv struct {
c *Client
func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
mu sync.Mutex // guards all fields
conn *grpc.ClientConn // conn in-use
type kv struct {
remote pb.KVClient
}
func NewKV(c *Client) KV {
conn := c.ActiveConnection()
remote := pb.NewKVClient(conn)
return &kv{
conn: c.ActiveConnection(),
remote: remote,
c: c,
}
return &kv{remote: pb.NewKVClient(c.conn)}
}
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
r, err := kv.Do(ctx, OpPut(key, val, opts...))
return r.put, err
return r.put, toErr(ctx, err)
}
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
r, err := kv.Do(ctx, OpGet(key, opts...))
return r.get, err
return r.get, toErr(ctx, err)
}
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
r, err := kv.Do(ctx, OpDelete(key, opts...))
return r.del, err
return r.del, toErr(ctx, err)
}
func (kv *kv) Compact(ctx context.Context, rev int64) error {
r := &pb.CompactionRequest{Revision: rev}
_, err := kv.getRemote().Compact(ctx, r)
if err == nil {
return nil
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
if isHalted(ctx, err) {
return err
}
go kv.switchRemote(err)
return err
return (*CompactResponse)(resp), err
}
func (kv *kv) Txn(ctx context.Context) Txn {
@ -133,75 +117,60 @@ func (kv *kv) Txn(ctx context.Context) Txn {
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev, Serializable: op.serializable}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
resp, err = kv.getRemote().Range(ctx, r)
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
resp, err = kv.getRemote().Put(ctx, r)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
resp, err = kv.getRemote().DeleteRange(ctx, r)
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
default:
panic("Unknown op")
resp, err := kv.do(ctx, op)
if err == nil {
return resp, nil
}
if isHalted(ctx, err) {
return OpResponse{}, err
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
// do not retry on modifications
if op.isWrite() {
go kv.switchRemote(err)
return OpResponse{}, err
}
if nerr := kv.switchRemote(err); nerr != nil {
return OpResponse{}, nerr
return resp, toErr(ctx, err)
}
}
}
func (kv *kv) switchRemote(prevErr error) error {
// Usually it's a bad idea to lock on network i/o but here it's OK
// since the link is down and new requests can't be processed anyway.
// Likewise, if connecting stalls, closing the Client can break the
// lock via context cancelation.
kv.mu.Lock()
defer kv.mu.Unlock()
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
newConn, err := kv.c.retryConnection(kv.conn, prevErr)
if err != nil {
return err
resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false))
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
resp, err = kv.remote.Put(ctx, r, grpc.FailFast(false))
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
resp, err = kv.remote.DeleteRange(ctx, r, grpc.FailFast(false))
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
default:
panic("Unknown op")
}
kv.conn = newConn
kv.remote = pb.NewKVClient(kv.conn)
return nil
}
func (kv *kv) getRemote() pb.KVClient {
kv.mu.Lock()
defer kv.mu.Unlock()
return kv.remote
return OpResponse{}, err
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -18,19 +18,36 @@ import (
"sync"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
LeaseGrantResponse pb.LeaseGrantResponse
LeaseRevokeResponse pb.LeaseRevokeResponse
LeaseKeepAliveResponse pb.LeaseKeepAliveResponse
LeaseID int64
LeaseRevokeResponse pb.LeaseRevokeResponse
LeaseID int64
)
// LeaseGrantResponse is used to convert the protobuf grant response.
type LeaseGrantResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
Error string
}
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
type LeaseKeepAliveResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
}
const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
defaultTTL = 5 * time.Second
// a small buffer to store unsent lease responses.
leaseResponseChSize = 16
// NoLease is a lease ID for the absence of a lease.
@ -57,10 +74,7 @@ type Lease interface {
}
type lessor struct {
c *Client
mu sync.Mutex // guards all fields
conn *grpc.ClientConn // conn in-use
mu sync.Mutex // guards all fields
// donec is closed when recvKeepAliveLoop stops
donec chan struct{}
@ -74,32 +88,38 @@ type lessor struct {
stopCancel context.CancelFunc
keepAlives map[LeaseID]*keepAlive
// firstKeepAliveTimeout is the timeout for the first keepalive request
// before the actual TTL is known to the lease client
firstKeepAliveTimeout time.Duration
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
type keepAlive struct {
chs []chan<- *LeaseKeepAliveResponse
ctxs []context.Context
// deadline is the next time to send a keep alive message
// deadline is the time the keep alive channels close if no response
deadline time.Time
// nextKeepAlive is when to send the next keep alive message
nextKeepAlive time.Time
// donec is closed on lease revoke, expiration, or cancel.
donec chan struct{}
}
func NewLease(c *Client) Lease {
l := &lessor{
c: c,
conn: c.ActiveConnection(),
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
remote: pb.NewLeaseClient(c.conn),
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
}
l.remote = pb.NewLeaseClient(l.conn)
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
go l.recvKeepAliveLoop()
go l.deadlineLoop()
return l
}
@ -110,14 +130,20 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
for {
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.getRemote().LeaseGrant(cctx, r)
resp, err := l.remote.LeaseGrant(cctx, r, grpc.FailFast(false))
if err == nil {
return (*LeaseGrantResponse)(resp), nil
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
if isHalted(cctx, err) {
return nil, err
if isHaltErr(cctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.switchRemoteAndStream(err); nerr != nil {
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
@ -130,16 +156,15 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
for {
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.getRemote().LeaseRevoke(cctx, r)
resp, err := l.remote.LeaseRevoke(cctx, r, grpc.FailFast(false))
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.switchRemoteAndStream(err); nerr != nil {
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
@ -153,10 +178,11 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
if !ok {
// create fresh keep alive
ka = &keepAlive{
chs: []chan<- *LeaseKeepAliveResponse{ch},
ctxs: []context.Context{ctx},
deadline: time.Now(),
donec: make(chan struct{}),
chs: []chan<- *LeaseKeepAliveResponse{ch},
ctxs: []context.Context{ctx},
deadline: time.Now().Add(l.firstKeepAliveTimeout),
nextKeepAlive: time.Now(),
donec: make(chan struct{}),
}
l.keepAlives[id] = ka
} else {
@ -179,11 +205,16 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
for {
resp, err := l.keepAliveOnce(cctx, id)
if err == nil {
if resp.TTL == 0 {
err = rpctypes.ErrLeaseNotFound
}
return resp, err
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
nerr := l.switchRemoteAndStream(err)
if nerr != nil {
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
@ -228,26 +259,34 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
}
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
stream, err := l.getRemote().LeaseKeepAlive(ctx)
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
if err != nil {
return nil, err
return nil, toErr(ctx, err)
}
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
if err != nil {
return nil, err
return nil, toErr(ctx, err)
}
resp, rerr := stream.Recv()
if rerr != nil {
return nil, rerr
return nil, toErr(ctx, rerr)
}
return (*LeaseKeepAliveResponse)(resp), nil
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
return karesp, nil
}
func (l *lessor) recvKeepAliveLoop() {
defer func() {
l.stopCancel()
l.mu.Lock()
close(l.donec)
for _, ka := range l.keepAlives {
@ -261,7 +300,7 @@ func (l *lessor) recvKeepAliveLoop() {
for serr == nil {
resp, err := stream.Recv()
if err != nil {
if isHalted(l.stopCtx, err) {
if isHaltErr(l.stopCtx, err) {
return
}
stream, serr = l.resetRecv()
@ -273,7 +312,7 @@ func (l *lessor) recvKeepAliveLoop() {
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
if err := l.switchRemoteAndStream(nil); err != nil {
if err := l.newStream(); err != nil {
return nil, err
}
stream := l.getKeepAliveStream()
@ -283,39 +322,68 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
id := LeaseID(resp.ID)
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[id]
ka, ok := l.keepAlives[karesp.ID]
if !ok {
return
}
if resp.TTL <= 0 {
if karesp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, id)
delete(l.keepAlives, karesp.ID)
ka.Close()
return
}
// send update to all channels
nextDeadline := time.Now().Add(1 + time.Duration(resp.TTL/3)*time.Second)
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
for _, ch := range ka.chs {
select {
case ch <- (*LeaseKeepAliveResponse)(resp):
ka.deadline = nextDeadline
case ch <- karesp:
ka.nextKeepAlive = nextKeepAlive
default:
}
}
}
// deadlineLoop reaps any keep alive channels that have not received a response
// within the lease TTL
func (l *lessor) deadlineLoop() {
for {
select {
case <-time.After(time.Second):
case <-l.donec:
return
}
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.Close()
delete(l.keepAlives, id)
}
}
l.mu.Unlock()
}
}
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for {
select {
case <-time.After(500 * time.Millisecond):
case <-stream.Context().Done():
return
case <-l.donec:
return
case <-l.stopCtx.Done():
@ -327,7 +395,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
if ka.nextKeepAlive.Before(now) {
tosend = append(tosend, id)
}
}
@ -343,57 +411,18 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
}
}
func (l *lessor) getRemote() pb.LeaseClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.remote
}
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.stream
}
func (l *lessor) switchRemoteAndStream(prevErr error) error {
l.mu.Lock()
conn := l.conn
l.mu.Unlock()
var (
err error
newConn *grpc.ClientConn
)
if prevErr != nil {
conn.Close()
newConn, err = l.c.retryConnection(conn, prevErr)
if err != nil {
return err
}
}
l.mu.Lock()
if newConn != nil {
l.conn = newConn
}
l.remote = pb.NewLeaseClient(l.conn)
l.mu.Unlock()
serr := l.newStream()
if serr != nil {
return serr
}
return nil
}
func (l *lessor) newStream() error {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.getRemote().LeaseKeepAlive(sctx)
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
if err != nil {
cancel()
return err
return toErr(sctx, err)
}
l.mu.Lock()

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,7 +15,7 @@
package clientv3
import (
"sync"
"io"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
@ -45,25 +45,20 @@ type Maintenance interface {
// times with different endpoints.
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
// Status gets the status of the member.
// Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// Snapshot provides a reader for a snapshot of a backend.
Snapshot(ctx context.Context) (io.ReadCloser, error)
}
type maintenance struct {
c *Client
mu sync.Mutex
conn *grpc.ClientConn // conn in-use
c *Client
remote pb.MaintenanceClient
}
func NewMaintenance(c *Client) Maintenance {
conn := c.ActiveConnection()
return &maintenance{
c: c,
conn: conn,
remote: pb.NewMaintenanceClient(conn),
}
return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)}
}
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
@ -73,15 +68,12 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
Alarm: pb.AlarmType_NONE, // all
}
for {
resp, err := m.getRemote().Alarm(ctx, req)
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
if err = m.switchRemote(err); err != nil {
return nil, err
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
@ -96,38 +88,36 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
ar, err := m.AlarmList(ctx)
if err != nil {
return nil, err
return nil, toErr(ctx, err)
}
ret := AlarmResponse{}
for _, am := range ar.Alarms {
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
if derr != nil {
return nil, derr
return nil, toErr(ctx, derr)
}
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
}
return &ret, nil
}
resp, err := m.getRemote().Alarm(ctx, req)
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
if !isHalted(ctx, err) {
go m.switchRemote(err)
}
return nil, err
return nil, toErr(ctx, err)
}
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, err
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{})
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
if err != nil {
return nil, err
return nil, toErr(ctx, err)
}
return (*DefragmentResponse)(resp), nil
}
@ -135,30 +125,40 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, err
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Status(ctx, &pb.StatusRequest{})
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
if err != nil {
return nil, err
return nil, toErr(ctx, err)
}
return (*StatusResponse)(resp), nil
}
func (m *maintenance) getRemote() pb.MaintenanceClient {
m.mu.Lock()
defer m.mu.Unlock()
return m.remote
}
func (m *maintenance) switchRemote(prevErr error) error {
m.mu.Lock()
defer m.mu.Unlock()
newConn, err := m.c.retryConnection(m.conn, prevErr)
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
if err != nil {
return err
return nil, toErr(ctx, err)
}
m.conn = newConn
m.remote = pb.NewMaintenanceClient(m.conn)
return nil
pr, pw := io.Pipe()
go func() {
for {
resp, err := ss.Recv()
if err != nil {
pw.CloseWithError(err)
return
}
if resp == nil && err == nil {
break
}
if _, werr := pw.Write(resp.Blob); werr != nil {
pw.CloseWithError(werr)
return
}
}
pw.Close()
}()
return pr, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -41,6 +41,8 @@ type Op struct {
limit int64
sort *SortOption
serializable bool
keysOnly bool
countOnly bool
// for range, watch
rev int64
@ -53,21 +55,29 @@ type Op struct {
leaseID LeaseID
}
func (op Op) toRequestUnion() *pb.RequestUnion {
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev, Serializable: op.serializable}
r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestRange{RequestRange: r}}
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}}
case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestPut{RequestPut: r}}
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestDeleteRange{RequestDeleteRange: r}}
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
default:
panic("Unknown Op")
}
@ -97,6 +107,8 @@ func OpDelete(key string, opts ...OpOption) Op {
panic("unexpected sort in delete")
case ret.serializable:
panic("unexpected serializable in delete")
case ret.countOnly:
panic("unexpected countOnly in delete")
}
return ret
}
@ -114,7 +126,9 @@ func OpPut(key, val string, opts ...OpOption) Op {
case ret.sort != nil:
panic("unexpected sort in put")
case ret.serializable:
panic("unexpected serializable in delete")
panic("unexpected serializable in put")
case ret.countOnly:
panic("unexpected countOnly in delete")
}
return ret
}
@ -131,6 +145,8 @@ func opWatch(key string, opts ...OpOption) Op {
panic("unexpected sort in watch")
case ret.serializable:
panic("unexpected serializable in watch")
case ret.countOnly:
panic("unexpected countOnly in delete")
}
return ret
}
@ -166,6 +182,12 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
}
}
// GetPrefixRangeEnd gets the range end of the prefix.
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
func GetPrefixRangeEnd(prefix string) string {
return string(getPrefix([]byte(prefix)))
}
func getPrefix(key []byte) []byte {
end := make([]byte, len(key))
copy(end, key)
@ -198,7 +220,7 @@ func WithRange(endKey string) OpOption {
}
// WithFromKey specifies the range of 'Get' or 'Delete' requests
// to be equal or greater than they key in the argument.
// to be equal or greater than the key in the argument.
func WithFromKey() OpOption { return WithRange("\x00") }
// WithSerializable makes 'Get' request serializable. By default,
@ -208,6 +230,17 @@ func WithSerializable() OpOption {
return func(op *Op) { op.serializable = true }
}
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
// values will be omitted.
func WithKeysOnly() OpOption {
return func(op *Op) { op.keysOnly = true }
}
// WithCountOnly makes the 'Get' request return only the count of keys.
func WithCountOnly() OpOption {
return func(op *Op) { op.countOnly = true }
}
// WithFirstCreate gets the key with the oldest creation revision in the request range.
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,17 +19,20 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// Txn is the interface that wraps mini-transactions.
//
// Tx.If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
//
// Tx.If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations
@ -63,8 +66,8 @@ type txn struct {
cmps []*pb.Compare
sus []*pb.RequestUnion
fas []*pb.RequestUnion
sus []*pb.RequestOp
fas []*pb.RequestOp
}
func (txn *txn) If(cs ...Cmp) Txn {
@ -107,7 +110,7 @@ func (txn *txn) Then(ops ...Op) Txn {
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.sus = append(txn.sus, op.toRequestUnion())
txn.sus = append(txn.sus, op.toRequestOp())
}
return txn
@ -125,7 +128,7 @@ func (txn *txn) Else(ops ...Op) Txn {
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.fas = append(txn.fas, op.toRequestUnion())
txn.fas = append(txn.fas, op.toRequestOp())
}
return txn
@ -134,27 +137,25 @@ func (txn *txn) Else(ops ...Op) Txn {
func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock()
defer txn.mu.Unlock()
kv := txn.kv
for {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
resp, err := kv.getRemote().Txn(txn.ctx, r)
resp, err := txn.commit()
if err == nil {
return (*TxnResponse)(resp), nil
return resp, err
}
if isHalted(txn.ctx, err) {
return nil, err
if isHaltErr(txn.ctx, err) {
return nil, toErr(txn.ctx, err)
}
if txn.isWrite {
go kv.switchRemote(err)
return nil, err
}
if nerr := kv.switchRemote(err); nerr != nil {
return nil, nerr
return nil, toErr(txn.ctx, err)
}
}
}
func (txn *txn) commit() (*TxnResponse, error) {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
resp, err := txn.kv.remote.Txn(txn.ctx, r, grpc.FailFast(false))
if err != nil {
return nil, err
}
return (*TxnResponse)(resp), nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -17,20 +17,23 @@ package clientv3
import (
"fmt"
"sync"
"time"
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
storagepb "github.com/coreos/etcd/storage/storagepb"
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const (
EventTypeDelete = storagepb.DELETE
EventTypePut = storagepb.PUT
EventTypeDelete = mvccpb.DELETE
EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
)
type Event storagepb.Event
type Event mvccpb.Event
type WatchChan <-chan WatchResponse
@ -39,7 +42,7 @@ type Watcher interface {
// through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WitchPrefix'.
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests.
@ -57,6 +60,8 @@ type WatchResponse struct {
// If the watch failed and the stream was about to close, before the channel is closed,
// the channel sends a final response that has Canceled set to true with a non-nil Err().
Canceled bool
closeErr error
}
// IsCreate returns true if the event tells that the key is newly created.
@ -71,10 +76,12 @@ func (e *Event) IsModify() bool {
// Err is the error value if this WatchResponse holds an error.
func (wr *WatchResponse) Err() error {
if wr.CompactRevision != 0 {
switch {
case wr.closeErr != nil:
return v3rpc.Error(wr.closeErr)
case wr.CompactRevision != 0:
return v3rpc.ErrCompacted
}
if wr.Canceled {
case wr.Canceled:
return v3rpc.ErrFutureRev
}
return nil
@ -87,18 +94,28 @@ func (wr *WatchResponse) IsProgressNotify() bool {
// watcher implements the Watcher interface
type watcher struct {
c *Client
conn *grpc.ClientConn
remote pb.WatchClient
// mu protects the grpc streams map
mu sync.RWMutex
// streams holds all the active grpc streams keyed by ctx value.
streams map[string]*watchGrpcStream
}
type watchGrpcStream struct {
owner *watcher
remote pb.WatchClient
// ctx controls internal remote.Watch requests
ctx context.Context
ctx context.Context
// ctxKey is the key used when looking up this stream's context
ctxKey string
cancel context.CancelFunc
// streams holds all active watchers
streams map[int64]*watcherStream
// mu protects the streams map
mu sync.RWMutex
// streams holds all active watchers
streams map[int64]*watcherStream
// reqc sends a watch request from Watch() to the main goroutine
reqc chan *watchRequest
@ -108,8 +125,11 @@ type watcher struct {
stopc chan struct{}
// donec closes to broadcast shutdown
donec chan struct{}
// errc transmits errors from grpc Recv
// errc transmits errors from grpc Recv to the watch stream reconn logic
errc chan error
// the error that closed the watch stream
closeErr error
}
// watchRequest is issued by the subscriber to start a new watcher
@ -126,6 +146,7 @@ type watchRequest struct {
// watcherStream represents a registered watcher
type watcherStream struct {
// initReq is the request that initiated this request
initReq watchRequest
// outc publishes watch responses to subscriber
@ -141,15 +162,30 @@ type watcherStream struct {
}
func NewWatcher(c *Client) Watcher {
ctx, cancel := context.WithCancel(context.Background())
conn := c.ActiveConnection()
return &watcher{
remote: pb.NewWatchClient(c.conn),
streams: make(map[string]*watchGrpcStream),
}
}
w := &watcher{
c: c,
conn: conn,
remote: pb.NewWatchClient(conn),
// never closes
var valCtxCh = make(chan struct{})
var zeroTime = time.Unix(0, 0)
// ctx with only the values; never Done
type valCtx struct{ context.Context }
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
func (vc *valCtx) Err() error { return nil }
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
ctx, cancel := context.WithCancel(&valCtx{inctx})
wgs := &watchGrpcStream{
owner: w,
remote: w.remote,
ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx),
cancel: cancel,
streams: make(map[int64]*watcherStream),
@ -159,8 +195,8 @@ func NewWatcher(c *Client) Watcher {
donec: make(chan struct{}),
errc: make(chan error, 1),
}
go w.run()
return w
go wgs.run()
return wgs
}
// Watch posts a watch request to run() and waits for a new watcher channel
@ -178,13 +214,41 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
}
ok := false
ctxKey := fmt.Sprintf("%v", ctx)
// find or allocate appropriate grpc watch stream
w.mu.Lock()
if w.streams == nil {
// closed
w.mu.Unlock()
ch := make(chan WatchResponse)
close(ch)
return ch
}
wgs := w.streams[ctxKey]
if wgs == nil {
wgs = w.newWatcherGrpcStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
reqc := wgs.reqc
w.mu.Unlock()
// couldn't create channel; return closed channel
closeCh := make(chan WatchResponse, 1)
// submit request
select {
case w.reqc <- wr:
case reqc <- wr:
ok = true
case <-wr.ctx.Done():
case <-w.donec:
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
// receive channel
@ -193,26 +257,44 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
case ret := <-retc:
return ret
case <-ctx.Done():
case <-w.donec:
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
}
// couldn't create channel; return closed channel
ch := make(chan WatchResponse)
close(ch)
return ch
close(closeCh)
return closeCh
}
func (w *watcher) Close() error {
select {
case w.stopc <- struct{}{}:
case <-w.donec:
func (w *watcher) Close() (err error) {
w.mu.Lock()
streams := w.streams
w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.Close(); werr != nil {
err = werr
}
}
<-w.donec
return <-w.errc
return err
}
func (w *watcher) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) {
func (w *watchGrpcStream) Close() (err error) {
close(w.stopc)
<-w.donec
select {
case err = <-w.errc:
default:
}
return toErr(w.ctx, err)
}
func (w *watchGrpcStream) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) {
if pendingReq == nil {
// no pending request; ignore
return
@ -265,27 +347,32 @@ func (w *watcher) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) {
}
// closeStream closes the watcher resources and removes it
func (w *watcher) closeStream(ws *watcherStream) {
func (w *watchGrpcStream) closeStream(ws *watcherStream) {
// cancels request stream; subscriber receives nil channel
close(ws.initReq.retc)
// close subscriber's channel
close(ws.outc)
// shutdown serveStream
close(ws.recvc)
delete(w.streams, ws.id)
}
// run is the root of the goroutines for managing a watcher client
func (w *watcher) run() {
func (w *watchGrpcStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
defer func() {
w.owner.mu.Lock()
w.closeErr = closeErr
if w.owner.streams != nil {
delete(w.owner.streams, w.ctxKey)
}
close(w.donec)
w.owner.mu.Unlock()
w.cancel()
}()
// start a stream with the etcd grpc server
wc, wcerr := w.newWatchClient()
if wcerr != nil {
w.errc <- wcerr
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
@ -314,6 +401,18 @@ func (w *watcher) run() {
curReqC = w.reqc
case pbresp.Canceled:
delete(cancelSet, pbresp.WatchId)
// shutdown serveStream, if any
w.mu.Lock()
if ws, ok := w.streams[pbresp.WatchId]; ok {
close(ws.recvc)
delete(w.streams, ws.id)
}
numStreams := len(w.streams)
w.mu.Unlock()
if numStreams == 0 {
// don't leak watcher streams
return
}
default:
// dispatch to appropriate watch stream
if ok := w.dispatchEvent(pbresp); ok {
@ -334,9 +433,12 @@ func (w *watcher) run() {
}
// watch client failed to recv; spawn another if possible
// TODO report watch client errors from errc?
case <-w.errc:
if wc, wcerr = w.newWatchClient(); wcerr != nil {
w.errc <- wcerr
case err := <-w.errc:
if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
return
}
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
curReqC = w.reqc
@ -345,7 +447,6 @@ func (w *watcher) run() {
}
cancelSet = make(map[int64]struct{})
case <-w.stopc:
w.errc <- nil
return
}
@ -365,7 +466,7 @@ func (w *watcher) run() {
}
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watcher) dispatchEvent(pbresp *pb.WatchResponse) bool {
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
w.mu.RLock()
defer w.mu.RUnlock()
ws, ok := w.streams[pbresp.WatchId]
@ -385,7 +486,7 @@ func (w *watcher) dispatchEvent(pbresp *pb.WatchResponse) bool {
}
// serveWatchClient forwards messages from the grpc stream to run()
func (w *watcher) serveWatchClient(wc pb.Watch_WatchClient) {
func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
for {
resp, err := wc.Recv()
if err != nil {
@ -404,7 +505,8 @@ func (w *watcher) serveWatchClient(wc pb.Watch_WatchClient) {
}
// serveStream forwards watch responses from run() to the subscriber
func (w *watcher) serveStream(ws *watcherStream) {
func (w *watchGrpcStream) serveStream(ws *watcherStream) {
var closeErr error
emptyWr := &WatchResponse{}
wrs := []*WatchResponse{}
resuming := false
@ -440,7 +542,7 @@ func (w *watcher) serveStream(ws *watcherStream) {
return
}
// resume up to last seen event if disconnected
if resuming {
if resuming && wr.Err() == nil {
resuming = false
// trim events already seen
for i := 0; i < len(wr.Events); i++ {
@ -454,6 +556,7 @@ func (w *watcher) serveStream(ws *watcherStream) {
break
}
}
resuming = false
// TODO don't keep buffering if subscriber stops reading
wrs = append(wrs, wr)
case resumeRev := <-ws.resumec:
@ -468,17 +571,28 @@ func (w *watcher) serveStream(ws *watcherStream) {
}
case <-w.donec:
closing = true
closeErr = w.closeErr
case <-ws.initReq.ctx.Done():
closing = true
}
}
// try to send off close error
if closeErr != nil {
select {
case ws.outc <- WatchResponse{closeErr: w.closeErr}:
case <-w.donec:
case <-time.After(closeSendErrTimeout):
}
}
w.mu.Lock()
w.closeStream(ws)
w.mu.Unlock()
// lazily send cancel message if events on missing id
}
func (w *watcher) newWatchClient() (pb.Watch_WatchClient, error) {
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
ws, rerr := w.resume()
if rerr != nil {
return nil, rerr
@ -488,7 +602,7 @@ func (w *watcher) newWatchClient() (pb.Watch_WatchClient, error) {
}
// resume creates a new WatchClient with all current watchers reestablished
func (w *watcher) resume() (ws pb.Watch_WatchClient, err error) {
func (w *watchGrpcStream) resume() (ws pb.Watch_WatchClient, err error) {
for {
if ws, err = w.openWatchClient(); err != nil {
break
@ -496,31 +610,34 @@ func (w *watcher) resume() (ws pb.Watch_WatchClient, err error) {
break
}
}
return ws, err
return ws, v3rpc.Error(err)
}
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watcher) openWatchClient() (ws pb.Watch_WatchClient, err error) {
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for {
if ws, err = w.remote.Watch(w.ctx); ws != nil {
break
} else if isHalted(w.ctx, err) {
select {
case <-w.stopc:
if err == nil {
err = context.Canceled
}
return nil, err
default:
}
newConn, nerr := w.c.retryConnection(w.conn, nil)
if nerr != nil {
return nil, nerr
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
break
}
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
}
w.conn = newConn
w.remote = pb.NewWatchClient(w.conn)
}
return ws, nil
}
// resumeWatchers rebuilds every registered watcher on a new client
func (w *watcher) resumeWatchers(wc pb.Watch_WatchClient) error {
streams := []*watcherStream{}
func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error {
w.mu.RLock()
streams := make([]*watcherStream, 0, len(w.streams))
for _, ws := range w.streams {
streams = append(streams, ws)
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,7 +19,7 @@ import (
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/pkg/capnslog"
"github.com/jonboulle/clockwork"
"golang.org/x/net/context"
@ -96,7 +96,7 @@ func (t *Periodic) Run() {
plog.Noticef("Starting auto-compaction at revision %d", rev)
_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
if err == nil || err == storage.ErrCompacted {
if err == nil || err == mvcc.ErrCompacted {
t.revs = make([]int64, 0)
last = clock.Now()
plog.Noticef("Finished auto-compaction at revision %d", rev)

16
vendor/github.com/coreos/etcd/compactor/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package compactor implements automated policies for compacting etcd's mvcc storage.
package compactor

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"math"
"net"
"net/http"
"net/url"
"path"
@ -30,6 +29,7 @@ import (
"time"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/pkg/capnslog"
"github.com/jonboulle/clockwork"
@ -124,16 +124,15 @@ func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) {
if err != nil {
return nil, err
}
// TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
if err != nil {
return nil, err
}
tr.Proxy = pf
cfg := client.Config{
Transport: &http.Transport{
Proxy: pf,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
// TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
},
Transport: tr,
Endpoints: []string{u.String()},
}
c, err := client.New(cfg)

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -13,7 +13,7 @@
// limitations under the License.
// Package error describes errors in etcd project. When any change happens,
// Documentation/errorcode.md needs to be updated correspondingly.
// Documentation/v2/errorcode.md needs to be updated correspondingly.
package error
import (

View File

@ -0,0 +1,85 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"sync"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"github.com/coreos/pkg/capnslog"
)
type Capability string
const (
AuthCapability Capability = "auth"
V3rpcCapability Capability = "v3rpc"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "api")
// capabilityMaps is a static map of version to capability map.
// the base capabilities is the set of capability 2.0 supports.
capabilityMaps = map[string]map[Capability]bool{
"2.1.0": {AuthCapability: true},
"2.2.0": {AuthCapability: true},
"2.3.0": {AuthCapability: true},
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
}
enableMapMu sync.RWMutex
// enabledMap points to a map in capabilityMaps
enabledMap map[Capability]bool
curVersion *semver.Version
)
func init() {
enabledMap = make(map[Capability]bool)
}
// UpdateCapability updates the enabledMap when the cluster version increases.
func UpdateCapability(v *semver.Version) {
if v == nil {
// if recovered but version was never set by cluster
return
}
enableMapMu.Lock()
if curVersion != nil && !curVersion.LessThan(*v) {
enableMapMu.Unlock()
return
}
curVersion = v
enabledMap = capabilityMaps[curVersion.String()]
enableMapMu.Unlock()
plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
}
func IsCapabilityEnabled(c Capability) bool {
enableMapMu.RLock()
defer enableMapMu.RUnlock()
if enabledMap == nil {
return false
}
return enabledMap[c]
}
func EnableCapability(c Capability) {
enableMapMu.Lock()
defer enableMapMu.Unlock()
enabledMap[c] = true
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
package api

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -17,75 +17,14 @@ package v2http
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/go-semver/semver"
)
type capability string
const (
authCapability capability = "auth"
)
var (
// capabilityMaps is a static map of version to capability map.
// the base capabilities is the set of capability 2.0 supports.
capabilityMaps = map[string]map[capability]bool{
"2.1.0": {authCapability: true},
"2.2.0": {authCapability: true},
"2.3.0": {authCapability: true},
}
enableMapMu sync.Mutex
// enabledMap points to a map in capabilityMaps
enabledMap map[capability]bool
)
// capabilityLoop checks the cluster version every 500ms and updates
// the enabledMap when the cluster version increased.
// capabilityLoop MUST be ran in a goroutine before checking capability
// or using capabilityHandler.
func capabilityLoop(s *etcdserver.EtcdServer) {
stopped := s.StopNotify()
var pv *semver.Version
for {
if v := s.ClusterVersion(); v != pv {
if pv == nil {
pv = v
} else if v != nil && pv.LessThan(*v) {
pv = v
}
enableMapMu.Lock()
enabledMap = capabilityMaps[pv.String()]
enableMapMu.Unlock()
plog.Infof("enabled capabilities for version %s", pv)
}
select {
case <-stopped:
return
case <-time.After(500 * time.Millisecond):
}
}
}
func isCapabilityEnabled(c capability) bool {
enableMapMu.Lock()
defer enableMapMu.Unlock()
if enabledMap == nil {
return false
}
return enabledMap[c]
}
func capabilityHandler(c capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
func capabilityHandler(c api.Capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !isCapabilityEnabled(c) {
if !api.IsCapabilityEnabled(c) {
notCapable(w, r, c)
return
}
@ -93,7 +32,7 @@ func capabilityHandler(c capability, fn func(http.ResponseWriter, *http.Request)
}
}
func notCapable(w http.ResponseWriter, r *http.Request, c capability) {
func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) {
herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
if err := herr.WriteTo(w); err != nil {
plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -62,16 +62,15 @@ const (
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler {
go capabilityLoop(server)
sec := auth.NewStore(server, timeout)
kh := &keysHandler{
sec: sec,
server: server,
cluster: server.Cluster(),
timer: server,
timeout: timeout,
sec: sec,
server: server,
cluster: server.Cluster(),
timer: server,
timeout: timeout,
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
}
sh := &statsHandler{
@ -84,6 +83,7 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
cluster: server.Cluster(),
timeout: timeout,
clock: clockwork.NewRealClock(),
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
}
dmh := &deprecatedMachinesHandler{
@ -91,8 +91,9 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
}
sech := &authHandler{
sec: sec,
cluster: server.Cluster(),
sec: sec,
cluster: server.Cluster(),
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
}
mux := http.NewServeMux()
@ -133,11 +134,12 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
}
type keysHandler struct {
sec auth.Store
server etcdserver.Server
cluster api.Cluster
timer etcdserver.RaftTimer
timeout time.Duration
sec auth.Store
server etcdserver.Server
cluster api.Cluster
timer etcdserver.RaftTimer
timeout time.Duration
clientCertAuthEnabled bool
}
func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@ -157,7 +159,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
// The path must be valid at this point (we've parsed the request successfully).
if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive) {
if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) {
writeKeyNoAuth(w)
return
}
@ -200,18 +202,19 @@ func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
}
type membersHandler struct {
sec auth.Store
server etcdserver.Server
cluster api.Cluster
timeout time.Duration
clock clockwork.Clock
sec auth.Store
server etcdserver.Server
cluster api.Cluster
timeout time.Duration
clock clockwork.Clock
clientCertAuthEnabled bool
}
func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") {
return
}
if !hasWriteRootAccess(h.sec, r) {
if !hasWriteRootAccess(h.sec, r, h.clientCertAuthEnabled) {
writeNoAuth(w, r)
return
}
@ -720,20 +723,19 @@ func trimEventPrefix(ev *store.Event, prefix string) *store.Event {
// Since the *Event may reference one in the store history
// history, we must copy it before modifying
e := ev.Clone()
e.Node = trimNodeExternPrefix(e.Node, prefix)
e.PrevNode = trimNodeExternPrefix(e.PrevNode, prefix)
trimNodeExternPrefix(e.Node, prefix)
trimNodeExternPrefix(e.PrevNode, prefix)
return e
}
func trimNodeExternPrefix(n *store.NodeExtern, prefix string) *store.NodeExtern {
func trimNodeExternPrefix(n *store.NodeExtern, prefix string) {
if n == nil {
return nil
return
}
n.Key = strings.TrimPrefix(n.Key, prefix)
for _, nn := range n.Nodes {
nn = trimNodeExternPrefix(nn, prefix)
trimNodeExternPrefix(nn, prefix)
}
return n
}
func trimErrorPrefix(err error, prefix string) error {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -26,18 +26,56 @@ import (
)
type authHandler struct {
sec auth.Store
cluster api.Cluster
sec auth.Store
cluster api.Cluster
clientCertAuthEnabled bool
}
func hasWriteRootAccess(sec auth.Store, r *http.Request) bool {
func hasWriteRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
if r.Method == "GET" || r.Method == "HEAD" {
return true
}
return hasRootAccess(sec, r)
return hasRootAccess(sec, r, clientCertAuthEnabled)
}
func hasRootAccess(sec auth.Store, r *http.Request) bool {
func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User {
username, password, ok := r.BasicAuth()
if !ok {
plog.Warningf("auth: malformed basic auth encoding")
return nil
}
user, err := sec.GetUser(username)
if err != nil {
return nil
}
ok = sec.CheckPassword(user, password)
if !ok {
plog.Warningf("auth: incorrect password for user: %s", username)
return nil
}
return &user
}
func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User {
if r.TLS == nil {
return nil
}
for _, chains := range r.TLS.VerifiedChains {
for _, chain := range chains {
plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName)
user, err := sec.GetUser(chain.Subject.CommonName)
if err == nil {
plog.Debugf("auth: authenticated user %s by cert common name.", user.User)
return &user
}
}
}
return nil
}
func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
if sec == nil {
// No store means no auth available, eg, tests.
return true
@ -45,30 +83,30 @@ func hasRootAccess(sec auth.Store, r *http.Request) bool {
if !sec.AuthEnabled() {
return true
}
username, password, ok := r.BasicAuth()
if !ok {
return false
}
rootUser, err := sec.GetUser(username)
if err != nil {
return false
var rootUser *auth.User
if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
rootUser = userFromClientCertificate(sec, r)
if rootUser == nil {
return false
}
} else {
rootUser = userFromBasicAuth(sec, r)
if rootUser == nil {
return false
}
}
ok = sec.CheckPassword(rootUser, password)
if !ok {
plog.Warningf("auth: wrong password for user %s", username)
return false
}
for _, role := range rootUser.Roles {
if role == auth.RootRoleName {
return true
}
}
plog.Warningf("auth: user %s does not have the %s role for resource %s.", username, auth.RootRoleName, r.URL.Path)
plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, auth.RootRoleName, r.URL.Path)
return false
}
func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive bool) bool {
func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
if sec == nil {
// No store means no auth available, eg, tests.
return true
@ -76,25 +114,21 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive b
if !sec.AuthEnabled() {
return true
}
if r.Header.Get("Authorization") == "" {
plog.Warningf("auth: no authorization provided, checking guest access")
return hasGuestAccess(sec, r, key)
}
username, password, ok := r.BasicAuth()
if !ok {
plog.Warningf("auth: malformed basic auth encoding")
return false
}
user, err := sec.GetUser(username)
if err != nil {
plog.Warningf("auth: no such user: %s.", username)
return false
}
authAsUser := sec.CheckPassword(user, password)
if !authAsUser {
plog.Warningf("auth: incorrect password for user: %s.", username)
return false
var user *auth.User
if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
user = userFromClientCertificate(sec, r)
if user == nil {
plog.Warningf("auth: no authorization provided, checking guest access")
return hasGuestAccess(sec, r, key)
}
} else {
user = userFromBasicAuth(sec, r)
if user == nil {
return false
}
}
writeAccess := r.Method != "GET" && r.Method != "HEAD"
for _, roleName := range user.Roles {
role, err := sec.GetRole(roleName)
@ -109,7 +143,7 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive b
return true
}
}
plog.Warningf("auth: invalid access for user %s on key %s.", username, key)
plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key)
return false
}
@ -134,18 +168,18 @@ func writeNoAuth(w http.ResponseWriter, r *http.Request) {
}
func handleAuth(mux *http.ServeMux, sh *authHandler) {
mux.HandleFunc(authPrefix+"/roles", capabilityHandler(authCapability, sh.baseRoles))
mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(authCapability, sh.handleRoles))
mux.HandleFunc(authPrefix+"/users", capabilityHandler(authCapability, sh.baseUsers))
mux.HandleFunc(authPrefix+"/users/", capabilityHandler(authCapability, sh.handleUsers))
mux.HandleFunc(authPrefix+"/enable", capabilityHandler(authCapability, sh.enableDisable))
mux.HandleFunc(authPrefix+"/roles", capabilityHandler(api.AuthCapability, sh.baseRoles))
mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(api.AuthCapability, sh.handleRoles))
mux.HandleFunc(authPrefix+"/users", capabilityHandler(api.AuthCapability, sh.baseUsers))
mux.HandleFunc(authPrefix+"/users/", capabilityHandler(api.AuthCapability, sh.handleUsers))
mux.HandleFunc(authPrefix+"/enable", capabilityHandler(api.AuthCapability, sh.enableDisable))
}
func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
if !hasRootAccess(sh.sec, r) {
if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r)
return
}
@ -209,7 +243,7 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri
if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
return
}
if !hasRootAccess(sh.sec, r) {
if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r)
return
}
@ -285,11 +319,15 @@ type userWithRoles struct {
Roles []auth.Role `json:"roles,omitempty"`
}
type usersCollections struct {
Users []userWithRoles `json:"users"`
}
func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
if !hasRootAccess(sh.sec, r) {
if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r)
return
}
@ -311,9 +349,7 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
return
}
var usersCollections struct {
Users []userWithRoles `json:"users"`
}
ucs := usersCollections{}
for _, userName := range users {
var user auth.User
user, err = sh.sec.GetUser(userName)
@ -327,15 +363,14 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
var role auth.Role
role, err = sh.sec.GetRole(roleName)
if err != nil {
writeError(w, r, err)
return
continue
}
uwr.Roles = append(uwr.Roles, role)
}
usersCollections.Users = append(usersCollections.Users, uwr)
ucs.Users = append(ucs.Users, uwr)
}
err = json.NewEncoder(w).Encode(usersCollections)
err = json.NewEncoder(w).Encode(ucs)
if err != nil {
plog.Warningf("baseUsers error encoding on %s", r.URL)
@ -364,7 +399,7 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri
if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
return
}
if !hasRootAccess(sh.sec, r) {
if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r)
return
}
@ -477,7 +512,7 @@ func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
return
}
if !hasWriteRootAccess(sh.sec, r) {
if !hasWriteRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r)
return
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -48,7 +48,7 @@ var (
prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "http",
Name: "successful_duration_second",
Name: "successful_duration_seconds",
Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
}, []string{"method"})

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -37,13 +37,19 @@ func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (
}
func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
plog.Info("not implemented yet")
return nil, nil
resp, err := as.authenticator.AuthDisable(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
plog.Info("not implemented yet")
return nil, nil
resp, err := as.authenticator.Authenticate(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
@ -55,23 +61,43 @@ func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*p
}
func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
plog.Info("not implemented yet")
return nil, nil
resp, err := as.authenticator.RoleDelete(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
plog.Info("not implemented yet")
return nil, nil
resp, err := as.authenticator.RoleGet(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) RoleRevoke(ctx context.Context, r *pb.AuthRoleRevokeRequest) (*pb.AuthRoleRevokeResponse, error) {
plog.Info("not implemented yet")
return nil, nil
func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
resp, err := as.authenticator.RoleList(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) RoleGrant(ctx context.Context, r *pb.AuthRoleGrantRequest) (*pb.AuthRoleGrantResponse, error) {
plog.Info("not implemented yet")
return nil, nil
func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
resp, err := as.authenticator.RoleRevokePermission(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
resp, err := as.authenticator.RoleGrantPermission(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
@ -91,18 +117,35 @@ func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteReques
}
func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
plog.Info("not implemented yet")
return nil, nil
resp, err := as.authenticator.UserGet(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) UserGrant(ctx context.Context, r *pb.AuthUserGrantRequest) (*pb.AuthUserGrantResponse, error) {
plog.Info("not implemented yet")
return nil, nil
func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
resp, err := as.authenticator.UserList(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) UserRevoke(ctx context.Context, r *pb.AuthUserRevokeRequest) (*pb.AuthUserRevokeResponse, error) {
plog.Info("not implemented yet")
return nil, nil
func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
resp, err := as.authenticator.UserGrantRole(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
resp, err := as.authenticator.UserRevokeRole(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {

View File

@ -0,0 +1,34 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import "github.com/gogo/protobuf/proto"
type codec struct{}
func (c *codec) Marshal(v interface{}) ([]byte, error) {
b, err := proto.Marshal(v.(proto.Message))
sentBytes.Add(float64(len(b)))
return b, err
}
func (c *codec) Unmarshal(data []byte, v interface{}) error {
receivedBytes.Add(float64(len(data)))
return proto.Unmarshal(data, v.(proto.Message))
}
func (c *codec) String() string {
return "proto"
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,15 +19,24 @@ import (
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/pkg/capnslog"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
)
func init() {
grpclog.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "v3rpc/grpc"))
}
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
var opts []grpc.ServerOption
opts = append(opts, grpc.CustomCodec(&codec{}))
if tls != nil {
opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
}
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
grpcServer := grpc.NewServer(opts...)
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
@ -36,5 +45,6 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
return grpcServer
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -40,4 +40,7 @@ func (h *header) fill(rh *pb.ResponseHeader) {
rh.ClusterId = uint64(h.clusterID)
rh.MemberId = uint64(h.memberID)
rh.RaftTerm = h.raftTimer.Term()
if rh.Revision == 0 {
rh.Revision = h.rev()
}
}

View File

@ -0,0 +1,176 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"strings"
"sync"
"time"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
maxNoLeaderCnt = 3
)
type streamsMap struct {
mu sync.Mutex
streams map[grpc.ServerStream]struct{}
}
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
return nil, rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ctx)
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
return nil, rpctypes.ErrGRPCNoLeader
}
}
}
return metricsUnaryInterceptor(ctx, req, info, handler)
}
}
func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
smap := monitorLeader(s)
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
return rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ss.Context())
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
return rpctypes.ErrGRPCNoLeader
}
cctx, cancel := context.WithCancel(ss.Context())
ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
smap.mu.Lock()
smap.streams[ss] = struct{}{}
smap.mu.Unlock()
defer func() {
smap.mu.Lock()
delete(smap.streams, ss)
smap.mu.Unlock()
cancel()
}()
}
}
return metricsStreamInterceptor(srv, ss, info, handler)
}
}
func metricsUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
service, method := splitMethodName(info.FullMethod)
receivedCounter.WithLabelValues(service, method).Inc()
start := time.Now()
resp, err = handler(ctx, req)
if err != nil {
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
}
handlingDuration.WithLabelValues(service, method).Observe(time.Since(start).Seconds())
return resp, err
}
func metricsStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
service, method := splitMethodName(info.FullMethod)
receivedCounter.WithLabelValues(service, method).Inc()
err := handler(srv, ss)
if err != nil {
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
}
return err
}
func splitMethodName(fullMethodName string) (string, string) {
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
if i := strings.Index(fullMethodName, "/"); i >= 0 {
return fullMethodName[:i], fullMethodName[i+1:]
}
return "unknown", "unknown"
}
type serverStreamWithCtx struct {
grpc.ServerStream
ctx context.Context
cancel *context.CancelFunc
}
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
smap := &streamsMap{
streams: make(map[grpc.ServerStream]struct{}),
}
go func() {
election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
noLeaderCnt := 0
for {
select {
case <-s.StopNotify():
return
case <-time.After(election):
if s.Leader() == types.ID(raft.None) {
noLeaderCnt++
} else {
noLeaderCnt = 0
}
// We are more conservative on canceling existing streams. Reconnecting streams
// cost much more than just rejecting new requests. So we wait until the member
// cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
if noLeaderCnt >= maxNoLeaderCnt {
smap.mu.Lock()
for ss := range smap.streams {
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
(*ssWithCtx.cancel)()
<-ss.Context().Done()
}
}
smap.streams = make(map[grpc.ServerStream]struct{})
smap.mu.Unlock()
}
}
}
}()
return smap
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -125,38 +125,38 @@ func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.Co
func checkRangeRequest(r *pb.RangeRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey
return rpctypes.ErrGRPCEmptyKey
}
return nil
}
func checkPutRequest(r *pb.PutRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey
return rpctypes.ErrGRPCEmptyKey
}
return nil
}
func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey
return rpctypes.ErrGRPCEmptyKey
}
return nil
}
func checkTxnRequest(r *pb.TxnRequest) error {
if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn {
return rpctypes.ErrTooManyOps
return rpctypes.ErrGRPCTooManyOps
}
for _, c := range r.Compare {
if len(c.Key) == 0 {
return rpctypes.ErrEmptyKey
return rpctypes.ErrGRPCEmptyKey
}
}
for _, u := range r.Success {
if err := checkRequestUnion(u); err != nil {
if err := checkRequestOp(u); err != nil {
return err
}
}
@ -165,23 +165,19 @@ func checkTxnRequest(r *pb.TxnRequest) error {
}
for _, u := range r.Failure {
if err := checkRequestUnion(u); err != nil {
if err := checkRequestOp(u); err != nil {
return err
}
}
if err := checkRequestDupKeys(r.Failure); err != nil {
return err
}
return nil
return checkRequestDupKeys(r.Failure)
}
// checkRequestDupKeys gives rpctypes.ErrDuplicateKey if the same key is modified twice
func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice
func checkRequestDupKeys(reqs []*pb.RequestOp) error {
// check put overlap
keys := make(map[string]struct{})
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestPut)
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
if !ok {
continue
}
@ -189,11 +185,10 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
if preq == nil {
continue
}
key := string(preq.Key)
if _, ok := keys[key]; ok {
return rpctypes.ErrDuplicateKey
if _, ok := keys[string(preq.Key)]; ok {
return rpctypes.ErrGRPCDuplicateKey
}
keys[key] = struct{}{}
keys[string(preq.Key)] = struct{}{}
}
// no need to check deletes if no puts; delete overlaps are permitted
@ -210,7 +205,7 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
// check put overlap with deletes
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestDeleteRange)
tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange)
if !ok {
continue
}
@ -218,17 +213,16 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
if dreq == nil {
continue
}
key := string(dreq.Key)
if dreq.RangeEnd == nil {
if _, found := keys[key]; found {
return rpctypes.ErrDuplicateKey
if _, found := keys[string(dreq.Key)]; found {
return rpctypes.ErrGRPCDuplicateKey
}
} else {
lo := sort.SearchStrings(sortedKeys, key)
lo := sort.SearchStrings(sortedKeys, string(dreq.Key))
hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd))
if lo != hi {
// element between lo and hi => overlap
return rpctypes.ErrDuplicateKey
return rpctypes.ErrGRPCDuplicateKey
}
}
}
@ -236,23 +230,23 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
return nil
}
func checkRequestUnion(u *pb.RequestUnion) error {
func checkRequestOp(u *pb.RequestOp) error {
// TODO: ensure only one of the field is set.
switch uv := u.Request.(type) {
case *pb.RequestUnion_RequestRange:
case *pb.RequestOp_RequestRange:
if uv.RequestRange != nil {
return checkRangeRequest(uv.RequestRange)
}
case *pb.RequestUnion_RequestPut:
case *pb.RequestOp_RequestPut:
if uv.RequestPut != nil {
return checkPutRequest(uv.RequestPut)
}
case *pb.RequestUnion_RequestDeleteRange:
case *pb.RequestOp_RequestDeleteRange:
if uv.RequestDeleteRange != nil {
return checkDeleteRequest(uv.RequestDeleteRange)
}
default:
// empty union
// empty op
return nil
}
return nil

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -25,27 +25,33 @@ import (
)
type LeaseServer struct {
le etcdserver.Lessor
hdr header
le etcdserver.Lessor
}
func NewLeaseServer(le etcdserver.Lessor) pb.LeaseServer {
return &LeaseServer{le: le}
func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
return &LeaseServer{le: s, hdr: newHeader(s)}
}
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
resp, err := ls.le.LeaseGrant(ctx, cr)
if err == lease.ErrLeaseExists {
return nil, rpctypes.ErrLeaseExist
return nil, rpctypes.ErrGRPCLeaseExist
}
if err != nil {
return nil, err
}
ls.hdr.fill(resp.Header)
return resp, err
}
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
r, err := ls.le.LeaseRevoke(ctx, rr)
resp, err := ls.le.LeaseRevoke(ctx, rr)
if err != nil {
return nil, rpctypes.ErrLeaseNotFound
return nil, rpctypes.ErrGRPCLeaseNotFound
}
return r, nil
ls.hdr.fill(resp.Header)
return resp, nil
}
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
@ -58,16 +64,26 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
return err
}
// Create header before we sent out the renew request.
// This can make sure that the revision is strictly smaller or equal to
// when the keepalive happened at the local server (when the local server is the leader)
// or remote leader.
// Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
// at rev 4.
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
ls.hdr.fill(resp.Header)
ttl, err := ls.le.LeaseRenew(lease.LeaseID(req.ID))
if err == lease.ErrLeaseNotFound {
return rpctypes.ErrLeaseNotFound
err = nil
ttl = 0
}
if err != nil && err != lease.ErrLeaseNotFound {
if err != nil {
return err
}
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, TTL: ttl}
resp.TTL = ttl
err = stream.Send(resp)
if err != nil {
return err

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,13 +15,22 @@
package v3rpc
import (
"crypto/sha256"
"io"
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/version"
"golang.org/x/net/context"
)
type KVGetter interface {
KV() mvcc.ConsistentWatchableKV
}
type BackendGetter interface {
Backend() backend.Backend
}
@ -30,33 +39,86 @@ type Alarmer interface {
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
}
type RaftStatusGetter interface {
Index() uint64
Term() uint64
Leader() types.ID
}
type maintenanceServer struct {
rg RaftStatusGetter
kg KVGetter
bg BackendGetter
a Alarmer
hdr header
}
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
return &maintenanceServer{bg: s, a: s, hdr: newHeader(s)}
return &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
}
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
plog.Noticef("starting to defragment the storage backend...")
err := ms.bg.Backend().Defrag()
if err != nil {
plog.Errorf("failed to deframent the storage backend (%v)", err)
plog.Errorf("failed to defragment the storage backend (%v)", err)
return nil, err
}
plog.Noticef("finished defragmenting the storage backend")
return &pb.DefragmentResponse{}, nil
}
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
snap := ms.bg.Backend().Snapshot()
pr, pw := io.Pipe()
defer pr.Close()
go func() {
snap.WriteTo(pw)
if err := snap.Close(); err != nil {
plog.Errorf("error closing snapshot (%v)", err)
}
pw.Close()
}()
// send file data
h := sha256.New()
br := int64(0)
buf := make([]byte, 32*1024)
sz := snap.Size()
for br < sz {
n, err := io.ReadFull(pr, buf)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return togRPCError(err)
}
br += int64(n)
resp := &pb.SnapshotResponse{
RemainingBytes: uint64(sz - br),
Blob: buf[:n],
}
if err = srv.Send(resp); err != nil {
return togRPCError(err)
}
h.Write(buf[:n])
}
// send sha
sha := h.Sum(nil)
hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
if err := srv.Send(hresp); err != nil {
return togRPCError(err)
}
return nil
}
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
h, err := ms.bg.Backend().Hash()
h, rev, err := ms.kg.KV().Hash()
if err != nil {
return nil, togRPCError(err)
}
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, Hash: h}
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
ms.hdr.fill(resp.Header)
return resp, nil
}
@ -66,7 +128,14 @@ func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*p
}
func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
resp := &pb.StatusResponse{Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, Version: version.Version}
resp := &pb.StatusResponse{
Header: &pb.ResponseHeader{Revision: ms.hdr.rev()},
Version: version.Version,
DbSize: ms.bg.Backend().Size(),
Leader: uint64(ms.rg.Leader()),
RaftIndex: ms.rg.Index(),
RaftTerm: ms.rg.Term(),
}
ms.hdr.fill(resp.Header)
return resp, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -45,7 +45,7 @@ func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
urls, err := types.NewURLs(r.PeerURLs)
if err != nil {
return nil, rpctypes.ErrMemberBadURLs
return nil, rpctypes.ErrGRPCMemberBadURLs
}
now := time.Now()
@ -53,16 +53,16 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
err = cs.server.AddMember(ctx, *m)
switch {
case err == membership.ErrIDExists:
return nil, rpctypes.ErrMemberExist
return nil, rpctypes.ErrGRPCMemberExist
case err == membership.ErrPeerURLexists:
return nil, rpctypes.ErrPeerURLExist
return nil, rpctypes.ErrGRPCPeerURLExist
case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error())
}
return &pb.MemberAddResponse{
Header: cs.header(),
Member: &pb.Member{ID: uint64(m.ID), IsLeader: m.ID == cs.server.Leader(), PeerURLs: m.PeerURLs},
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
}, nil
}
@ -72,7 +72,7 @@ func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveReq
case err == membership.ErrIDRemoved:
fallthrough
case err == membership.ErrIDNotFound:
return nil, rpctypes.ErrMemberNotFound
return nil, rpctypes.ErrGRPCMemberNotFound
case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error())
}
@ -88,9 +88,9 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
err := cs.server.UpdateMember(ctx, m)
switch {
case err == membership.ErrPeerURLexists:
return nil, rpctypes.ErrPeerURLExist
return nil, rpctypes.ErrGRPCPeerURLExist
case err == membership.ErrIDNotFound:
return nil, rpctypes.ErrMemberNotFound
return nil, rpctypes.ErrGRPCMemberNotFound
case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error())
}
@ -106,7 +106,6 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest
protoMembs[i] = &pb.Member{
Name: membs[i].Name,
ID: uint64(membs[i].ID),
IsLeader: membs[i].ID == cs.server.Leader(),
PeerURLs: membs[i].PeerURLs,
ClientURLs: membs[i].ClientURLs,
}

View File

@ -0,0 +1,67 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import "github.com/prometheus/client_golang/prometheus"
var (
receivedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "grpc",
Name: "requests_total",
Help: "Counter of received requests.",
}, []string{"grpc_service", "grpc_method"})
failedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "grpc",
Name: "requests_failed_total",
Help: "Counter of failed requests.",
}, []string{"grpc_service", "grpc_method", "grpc_code"})
handlingDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "grpc",
Name: "unary_requests_duration_seconds",
Help: "Bucketed histogram of processing time (s) of handled unary (non-stream) requests.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
}, []string{"grpc_service", "grpc_method"})
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "network",
Name: "client_grpc_sent_bytes_total",
Help: "The total number of bytes sent to grpc clients.",
})
receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "network",
Name: "client_grpc_received_bytes_total",
Help: "The total number of bytes received from grpc clients.",
})
)
func init() {
prometheus.MustRegister(receivedCounter)
prometheus.MustRegister(failedCounter)
prometheus.MustRegister(handlingDuration)
prometheus.MustRegister(sentBytes)
prometheus.MustRegister(receivedBytes)
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -45,7 +45,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
Alarm: pb.AlarmType_NOSPACE,
}
qa.a.Alarm(ctx, req)
return rpctypes.ErrNoSpace
return rpctypes.ErrGRPCNoSpace
}
func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {

View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
package rpctypes

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -20,24 +20,131 @@ import (
)
var (
ErrEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: storage: required revision has been compacted")
ErrFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: storage: required revision is a future revision")
ErrNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: storage: database space exceeded")
// server-side error
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
ErrMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
ErrPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
ErrMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
ErrMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
ErrRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
ErrUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
ErrUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
ErrRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied")
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped")
errStringToError = map[string]error{
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
}
// client-side error
ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrTooManyOps = Error(ErrGRPCTooManyOps)
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
ErrCompacted = Error(ErrGRPCCompacted)
ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist)
ErrMemberExist = Error(ErrGRPCMemberExist)
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
ErrUserNotFound = Error(ErrGRPCUserNotFound)
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
ErrAuthFailed = Error(ErrGRPCAuthFailed)
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotCapable = Error(ErrGRPCNotCapable)
ErrStopped = Error(ErrGRPCStopped)
)
// EtcdError defines gRPC server errors.
// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
type EtcdError struct {
code codes.Code
desc string
}
// Code returns grpc/codes.Code.
// TODO: define clientv3/codes.Code.
func (e EtcdError) Code() codes.Code {
return e.code
}
func (e EtcdError) Error() string {
return e.desc
}
func Error(err error) error {
if err == nil {
return nil
}
verr, ok := errStringToError[grpc.ErrorDesc(err)]
if !ok { // not gRPC error
return err
}
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
}

View File

@ -0,0 +1,20 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
var (
MetadataRequireLeaderKey = "hasleader"
MetadataHasLeader = "true"
)

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,30 +19,45 @@ import (
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/storage"
"github.com/coreos/etcd/mvcc"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func togRPCError(err error) error {
switch err {
case storage.ErrCompacted:
return rpctypes.ErrCompacted
case storage.ErrFutureRev:
return rpctypes.ErrFutureRev
case mvcc.ErrCompacted:
return rpctypes.ErrGRPCCompacted
case mvcc.ErrFutureRev:
return rpctypes.ErrGRPCFutureRev
case lease.ErrLeaseNotFound:
return rpctypes.ErrLeaseNotFound
return rpctypes.ErrGRPCLeaseNotFound
// TODO: handle error from raft and timeout
case etcdserver.ErrRequestTooLarge:
return rpctypes.ErrRequestTooLarge
return rpctypes.ErrGRPCRequestTooLarge
case etcdserver.ErrNoSpace:
return rpctypes.ErrNoSpace
return rpctypes.ErrGRPCNoSpace
case auth.ErrRootUserNotExist:
return rpctypes.ErrGRPCRootUserNotExist
case auth.ErrRootRoleNotExist:
return rpctypes.ErrGRPCRootRoleNotExist
case auth.ErrUserAlreadyExist:
return rpctypes.ErrUserAlreadyExist
return rpctypes.ErrGRPCUserAlreadyExist
case auth.ErrUserNotFound:
return rpctypes.ErrUserNotFound
return rpctypes.ErrGRPCUserNotFound
case auth.ErrRoleAlreadyExist:
return rpctypes.ErrRoleAlreadyExist
return rpctypes.ErrGRPCRoleAlreadyExist
case auth.ErrRoleNotFound:
return rpctypes.ErrGRPCRoleNotFound
case auth.ErrAuthFailed:
return rpctypes.ErrGRPCAuthFailed
case auth.ErrPermissionDenied:
return rpctypes.ErrGRPCPermissionDenied
case auth.ErrRoleNotGranted:
return rpctypes.ErrGRPCRoleNotGranted
case auth.ErrPermissionNotGranted:
return rpctypes.ErrGRPCPermissionNotGranted
default:
return grpc.Errorf(codes.Internal, err.Error())
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,17 +19,20 @@ import (
"sync"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage"
"github.com/coreos/etcd/storage/storagepb"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/mvccpb"
)
type watchServer struct {
clusterID int64
memberID int64
raftTimer etcdserver.RaftTimer
watchable storage.Watchable
watchable mvcc.Watchable
}
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
@ -71,7 +74,7 @@ const (
)
// serverWatchStream is an etcd server side stream. It receives requests
// from client side gRPC stream. It receives watch events from storage.WatchStream,
// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
// and creates responses that forwarded to gRPC stream.
// It also forwards control message like watch created and canceled.
type serverWatchStream struct {
@ -80,20 +83,23 @@ type serverWatchStream struct {
raftTimer etcdserver.RaftTimer
gRPCStream pb.Watch_WatchServer
watchStream storage.WatchStream
watchStream mvcc.WatchStream
ctrlStream chan *pb.WatchResponse
// mu protects progress, prevKV
mu sync.Mutex
// progress tracks the watchID that stream might need to send
// progress to.
progress map[storage.WatchID]bool
// mu protects progress
mu sync.Mutex
progress map[mvcc.WatchID]bool
// closec indicates the stream is closed.
closec chan struct{}
// wg waits for the send loop to complete
wg sync.WaitGroup
}
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) error {
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
sws := serverWatchStream{
clusterID: ws.clusterID,
memberID: ws.memberID,
@ -102,16 +108,38 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) error {
watchStream: ws.watchable.NewWatchStream(),
// chan for sending control response like watcher created and canceled.
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
progress: make(map[storage.WatchID]bool),
progress: make(map[mvcc.WatchID]bool),
closec: make(chan struct{}),
}
defer sws.close()
go sws.sendLoop()
return sws.recvLoop()
sws.wg.Add(1)
go func() {
sws.sendLoop()
sws.wg.Done()
}()
errc := make(chan error, 1)
// Ideally recvLoop would also use sws.wg to signal its completion
// but when stream.Context().Done() is closed, the stream's recv
// may continue to block since it uses a different context, leading to
// deadlock when calling sws.close().
go func() { errc <- sws.recvLoop() }()
select {
case err = <-errc:
case <-stream.Context().Done():
err = stream.Context().Err()
// the only server-side cancellation is noleader for now.
if err == context.Canceled {
err = rpctypes.ErrGRPCNoLeader
}
}
sws.close()
return err
}
func (sws *serverWatchStream) recvLoop() error {
defer close(sws.ctrlStream)
for {
req, err := sws.gRPCStream.Recv()
if err == io.EOF {
@ -143,18 +171,25 @@ func (sws *serverWatchStream) recvLoop() error {
}
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
if id != -1 && creq.ProgressNotify {
sws.mu.Lock()
sws.progress[id] = true
sws.mu.Unlock()
}
sws.ctrlStream <- &pb.WatchResponse{
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev),
WatchId: int64(id),
Created: true,
Canceled: id == -1,
}
select {
case sws.ctrlStream <- wr:
case <-sws.closec:
return nil
}
case *pb.WatchRequest_CancelRequest:
if uv.CancelRequest != nil {
id := uv.CancelRequest.WatchId
err := sws.watchStream.Cancel(storage.WatchID(id))
err := sws.watchStream.Cancel(mvcc.WatchID(id))
if err == nil {
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
@ -162,26 +197,40 @@ func (sws *serverWatchStream) recvLoop() error {
Canceled: true,
}
sws.mu.Lock()
delete(sws.progress, storage.WatchID(id))
delete(sws.progress, mvcc.WatchID(id))
sws.mu.Unlock()
}
}
// TODO: do we need to return error back to client?
default:
panic("not implemented")
// we probably should not shutdown the entire stream when
// receive an valid command.
// so just do nothing instead.
continue
}
}
}
func (sws *serverWatchStream) sendLoop() {
// watch ids that are currently active
ids := make(map[storage.WatchID]struct{})
ids := make(map[mvcc.WatchID]struct{})
// watch responses pending on a watch id creation message
pending := make(map[storage.WatchID][]*pb.WatchResponse)
pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
interval := GetProgressReportInterval()
progressTicker := time.NewTicker(interval)
defer progressTicker.Stop()
defer func() {
progressTicker.Stop()
// drain the chan to clean up pending events
for ws := range sws.watchStream.Chan() {
mvcc.ReportEventReceived(len(ws.Events))
}
for _, wrs := range pending {
for _, ws := range wrs {
mvcc.ReportEventReceived(len(ws.Events))
}
}
}()
for {
select {
@ -190,11 +239,11 @@ func (sws *serverWatchStream) sendLoop() {
return
}
// TODO: evs is []storagepb.Event type
// either return []*storagepb.Event from storage package
// or define protocol buffer with []storagepb.Event.
// TODO: evs is []mvccpb.Event type
// either return []*mvccpb.Event from the mvcc package
// or define protocol buffer with []mvccpb.Event.
evs := wresp.Events
events := make([]*storagepb.Event, len(evs))
events := make([]*mvccpb.Event, len(evs))
for i := range evs {
events[i] = &evs[i]
}
@ -213,13 +262,14 @@ func (sws *serverWatchStream) sendLoop() {
continue
}
storage.ReportEventReceived()
mvcc.ReportEventReceived(len(evs))
if err := sws.gRPCStream.Send(wr); err != nil {
return
}
sws.mu.Lock()
if _, ok := sws.progress[wresp.WatchID]; ok {
if len(evs) > 0 && sws.progress[wresp.WatchID] {
// elide next progress update if sent a key update
sws.progress[wresp.WatchID] = false
}
sws.mu.Unlock()
@ -234,7 +284,7 @@ func (sws *serverWatchStream) sendLoop() {
}
// track id creation
wid := storage.WatchID(c.WatchId)
wid := mvcc.WatchID(c.WatchId)
if c.Canceled {
delete(ids, wid)
continue
@ -243,7 +293,7 @@ func (sws *serverWatchStream) sendLoop() {
// flush buffered events
ids[wid] = struct{}{}
for _, v := range pending[wid] {
storage.ReportEventReceived()
mvcc.ReportEventReceived(len(v.Events))
if err := sws.gRPCStream.Send(v); err != nil {
return
}
@ -251,22 +301,16 @@ func (sws *serverWatchStream) sendLoop() {
delete(pending, wid)
}
case <-progressTicker.C:
sws.mu.Lock()
for id, ok := range sws.progress {
if ok {
sws.watchStream.RequestProgress(id)
}
sws.progress[id] = true
}
sws.mu.Unlock()
case <-sws.closec:
// drain the chan to clean up pending events
for range sws.watchStream.Chan() {
storage.ReportEventReceived()
}
for _, wrs := range pending {
for range wrs {
storage.ReportEventReceived()
}
}
return
}
}
}
@ -274,7 +318,7 @@ func (sws *serverWatchStream) sendLoop() {
func (sws *serverWatchStream) close() {
sws.watchStream.Close()
close(sws.closec)
close(sws.ctrlStream)
sws.wg.Wait()
}
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc.
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -18,13 +18,15 @@ import (
"bytes"
"fmt"
"sort"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/types"
dstorage "github.com/coreos/etcd/storage"
"github.com/coreos/etcd/storage/storagepb"
"github.com/gogo/protobuf/proto"
"golang.org/x/net/context"
)
const (
@ -32,6 +34,8 @@ const (
// To apply with independent Range, Put, Delete, you can pass noTxn
// to apply functions instead of a valid txn ID.
noTxn = -1
warnApplyDuration = 10 * time.Millisecond
)
type applyResult struct {
@ -45,54 +49,103 @@ type applyResult struct {
// applierV3 is the interface for processing V3 raft messages
type applierV3 interface {
Apply(r *pb.InternalRaftRequest) *applyResult
Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error)
Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error)
DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
AuthEnable() (*pb.AuthEnableResponse, error)
AuthDisable() (*pb.AuthDisableResponse, error)
UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
}
type applierV3backend struct {
s *EtcdServer
}
func (s *EtcdServer) applyV3Request(r *pb.InternalRaftRequest) *applyResult {
func (s *EtcdServer) newApplierV3() applierV3 {
return newAuthApplierV3(
s.AuthStore(),
newQuotaApplierV3(s, &applierV3backend{s}),
)
}
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
ar := &applyResult{}
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
switch {
case r.Range != nil:
ar.resp, ar.err = s.applyV3.Range(noTxn, r.Range)
ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range)
case r.Put != nil:
ar.resp, ar.err = s.applyV3.Put(noTxn, r.Put)
ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put)
case r.DeleteRange != nil:
ar.resp, ar.err = s.applyV3.DeleteRange(noTxn, r.DeleteRange)
ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange)
case r.Txn != nil:
ar.resp, ar.err = s.applyV3.Txn(r.Txn)
ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
case r.Compaction != nil:
ar.resp, ar.physc, ar.err = s.applyV3.Compaction(r.Compaction)
ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
case r.LeaseGrant != nil:
ar.resp, ar.err = s.applyV3.LeaseGrant(r.LeaseGrant)
ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
case r.LeaseRevoke != nil:
ar.resp, ar.err = s.applyV3.LeaseRevoke(r.LeaseRevoke)
ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
case r.Alarm != nil:
ar.resp, ar.err = s.applyV3.Alarm(r.Alarm)
ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
case r.Authenticate != nil:
ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
case r.AuthEnable != nil:
ar.resp, ar.err = s.applyV3.AuthEnable()
ar.resp, ar.err = a.s.applyV3.AuthEnable()
case r.AuthDisable != nil:
ar.resp, ar.err = a.s.applyV3.AuthDisable()
case r.AuthUserAdd != nil:
ar.resp, ar.err = s.applyV3.UserAdd(r.AuthUserAdd)
ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
case r.AuthUserDelete != nil:
ar.resp, ar.err = s.applyV3.UserDelete(r.AuthUserDelete)
ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
case r.AuthUserChangePassword != nil:
ar.resp, ar.err = s.applyV3.UserChangePassword(r.AuthUserChangePassword)
ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
case r.AuthUserGrantRole != nil:
ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
case r.AuthUserGet != nil:
ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
case r.AuthUserRevokeRole != nil:
ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
case r.AuthRoleAdd != nil:
ar.resp, ar.err = s.applyV3.RoleAdd(r.AuthRoleAdd)
ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
case r.AuthRoleGrantPermission != nil:
ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
case r.AuthRoleGet != nil:
ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
case r.AuthRoleRevokePermission != nil:
ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
case r.AuthRoleDelete != nil:
ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
case r.AuthUserList != nil:
ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
case r.AuthRoleList != nil:
ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
default:
panic("not implemented")
}
@ -157,8 +210,7 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
resp.Header = &pb.ResponseHeader{}
var (
kvs []storagepb.KeyValue
rev int64
rr *mvcc.RangeResult
err error
)
@ -176,13 +228,19 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
limit = limit + 1
}
ro := mvcc.RangeOptions{
Limit: limit,
Rev: r.Revision,
Count: r.CountOnly,
}
if txnID != noTxn {
kvs, rev, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, limit, r.Revision)
rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro)
if err != nil {
return nil, err
}
} else {
kvs, rev, err = a.s.KV().Range(r.Key, r.RangeEnd, limit, r.Revision)
rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro)
if err != nil {
return nil, err
}
@ -192,15 +250,15 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
var sorter sort.Interface
switch {
case r.SortTarget == pb.RangeRequest_KEY:
sorter = &kvSortByKey{&kvSort{kvs}}
sorter = &kvSortByKey{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_VERSION:
sorter = &kvSortByVersion{&kvSort{kvs}}
sorter = &kvSortByVersion{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_CREATE:
sorter = &kvSortByCreate{&kvSort{kvs}}
sorter = &kvSortByCreate{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_MOD:
sorter = &kvSortByMod{&kvSort{kvs}}
sorter = &kvSortByMod{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_VALUE:
sorter = &kvSortByValue{&kvSort{kvs}}
sorter = &kvSortByValue{&kvSort{rr.KVs}}
}
switch {
case r.SortOrder == pb.RangeRequest_ASCEND:
@ -210,29 +268,31 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
}
}
if r.Limit > 0 && len(kvs) > int(r.Limit) {
kvs = kvs[:r.Limit]
if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
rr.KVs = rr.KVs[:r.Limit]
resp.More = true
}
resp.Header.Revision = rev
for i := range kvs {
resp.Kvs = append(resp.Kvs, &kvs[i])
resp.Header.Revision = rr.Rev
resp.Count = int64(rr.Count)
for i := range rr.KVs {
if r.KeysOnly {
rr.KVs[i].Value = nil
}
resp.Kvs = append(resp.Kvs, &rr.KVs[i])
}
return resp, nil
}
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
var revision int64
ok := true
for _, c := range rt.Compare {
if revision, ok = a.applyCompare(c); !ok {
if _, ok = a.applyCompare(c); !ok {
break
}
}
var reqs []*pb.RequestUnion
var reqs []*pb.RequestOp
if ok {
reqs = rt.Success
} else {
@ -246,6 +306,8 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
return nil, err
}
revision := a.s.KV().Rev()
// When executing the operations of txn, we need to hold the txn lock.
// So the reader will not see any intermediate results.
txnID := a.s.KV().TxnBegin()
@ -256,12 +318,16 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
}
}()
resps := make([]*pb.ResponseUnion, len(reqs))
resps := make([]*pb.ResponseOp, len(reqs))
changedKV := false
for i := range reqs {
if reqs[i].GetRequestRange() == nil {
changedKV = true
}
resps[i] = a.applyUnion(txnID, reqs[i])
}
if len(resps) != 0 {
if changedKV {
revision += 1
}
@ -277,16 +343,18 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
// It returns the revision at which the comparison happens. If the comparison
// succeeds, the it returns true. Otherwise it returns false.
func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
ckvs, rev, err := a.s.KV().Range(c.Key, nil, 1, 0)
rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{})
rev := rr.Rev
if err != nil {
if err == dstorage.ErrTxnIDMismatch {
if err == mvcc.ErrTxnIDMismatch {
panic("unexpected txn ID mismatch error")
}
return rev, false
}
var ckv storagepb.KeyValue
if len(ckvs) != 0 {
ckv = ckvs[0]
var ckv mvccpb.KeyValue
if len(rr.KVs) != 0 {
ckv = rr.KVs[0]
} else {
// Use the zero value of ckv normally. However...
if c.Target == pb.Compare_VALUE {
@ -341,31 +409,31 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
return rev, true
}
func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestUnion) *pb.ResponseUnion {
func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp {
switch tv := union.Request.(type) {
case *pb.RequestUnion_RequestRange:
case *pb.RequestOp_RequestRange:
if tv.RequestRange != nil {
resp, err := a.Range(txnID, tv.RequestRange)
if err != nil {
panic("unexpected error during txn")
}
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{ResponseRange: resp}}
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
}
case *pb.RequestUnion_RequestPut:
case *pb.RequestOp_RequestPut:
if tv.RequestPut != nil {
resp, err := a.Put(txnID, tv.RequestPut)
if err != nil {
panic("unexpected error during txn")
}
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponsePut{ResponsePut: resp}}
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
}
case *pb.RequestUnion_RequestDeleteRange:
case *pb.RequestOp_RequestDeleteRange:
if tv.RequestDeleteRange != nil {
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
if err != nil {
panic("unexpected error during txn")
}
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseDeleteRange{ResponseDeleteRange: resp}}
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
}
default:
// empty union
@ -383,7 +451,8 @@ func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.Com
return nil, ch, err
}
// get the current revision. which key to get is not important.
_, resp.Header.Revision, _ = a.s.KV().Range([]byte("compaction"), nil, 1, 0)
rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
resp.Header.Revision = rr.Rev
return resp, ch, err
}
@ -393,13 +462,15 @@ func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
if err == nil {
resp.ID = int64(l.ID)
resp.TTL = l.TTL
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
}
return resp, err
}
func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
return &pb.LeaseRevokeResponse{}, err
return &pb.LeaseRevokeResponse{Header: &pb.ResponseHeader{Revision: a.s.KV().Rev()}}, err
}
func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
@ -441,7 +512,7 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
switch m.Alarm {
case pb.AlarmType_NOSPACE:
plog.Infof("alarm disarmed %+v", ar)
a.s.applyV3 = newQuotaApplierV3(a.s, &applierV3backend{a.s})
a.s.applyV3 = a.s.newApplierV3()
default:
plog.Errorf("unimplemented alarm deactivation (%+v)", m)
}
@ -476,10 +547,23 @@ func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRe
}
func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
a.s.AuthStore().AuthEnable()
err := a.s.AuthStore().AuthEnable()
if err != nil {
return nil, err
}
return &pb.AuthEnableResponse{}, nil
}
func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
a.s.AuthStore().AuthDisable()
return &pb.AuthDisableResponse{}, nil
}
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
ctx := context.WithValue(context.WithValue(context.TODO(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
return a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
}
func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
return a.s.AuthStore().UserAdd(r)
}
@ -492,10 +576,46 @@ func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordReques
return a.s.AuthStore().UserChangePassword(r)
}
func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
return a.s.AuthStore().UserGrantRole(r)
}
func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
return a.s.AuthStore().UserGet(r)
}
func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
return a.s.AuthStore().UserRevokeRole(r)
}
func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
return a.s.AuthStore().RoleAdd(r)
}
func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
return a.s.AuthStore().RoleGrantPermission(r)
}
func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
return a.s.AuthStore().RoleGet(r)
}
func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
return a.s.AuthStore().RoleRevokePermission(r)
}
func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
return a.s.AuthStore().RoleDelete(r)
}
func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
return a.s.AuthStore().UserList(r)
}
func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
return a.s.AuthStore().RoleList(r)
}
type quotaApplierV3 struct {
applierV3
q Quota
@ -532,7 +652,7 @@ func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRes
return resp, err
}
type kvSort struct{ kvs []storagepb.KeyValue }
type kvSort struct{ kvs []mvccpb.KeyValue }
func (s *kvSort) Swap(i, j int) {
t := s.kvs[i]
@ -571,9 +691,9 @@ func (s *kvSortByValue) Less(i, j int) bool {
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
}
func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestUnion) error {
func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error {
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestPut)
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
if !ok {
continue
}
@ -588,9 +708,9 @@ func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestUnion) error {
return nil
}
func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestUnion) error {
func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error {
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestRange)
tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
if !ok {
continue
}
@ -600,10 +720,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestUnion) error {
}
if greq.Revision > a.s.KV().Rev() {
return dstorage.ErrFutureRev
return mvcc.ErrFutureRev
}
if greq.Revision < a.s.KV().FirstRev() {
return dstorage.ErrCompacted
return mvcc.ErrCompacted
}
}
return nil

163
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"sync"
"github.com/coreos/etcd/auth"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type authApplierV3 struct {
applierV3
as auth.AuthStore
// mu serializes Apply so that user isn't corrupted and so that
// serialized requests don't leak data from TOCTOU errors
mu sync.Mutex
user string
}
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
return &authApplierV3{applierV3: base, as: as}
}
func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
aa.mu.Lock()
defer aa.mu.Unlock()
if r.Header != nil {
// backward-compatible with pre-3.0 releases when internalRaftRequest
// does not have header field
aa.user = r.Header.Username
}
if needAdminPermission(r) && !aa.as.IsAdminPermitted(aa.user) {
aa.user = ""
return &applyResult{err: auth.ErrPermissionDenied}
}
ret := aa.applierV3.Apply(r)
aa.user = ""
return ret
}
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
if !aa.as.IsPutPermitted(aa.user, r.Key) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.Put(txnID, r)
}
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.Range(txnID, r)
}
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
if !aa.as.IsDeleteRangePermitted(aa.user, r.Key, r.RangeEnd) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.DeleteRange(txnID, r)
}
func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
for _, requ := range reqs {
switch tv := requ.Request.(type) {
case *pb.RequestOp_RequestRange:
if tv.RequestRange == nil {
continue
}
if !aa.as.IsRangePermitted(aa.user, tv.RequestRange.Key, tv.RequestRange.RangeEnd) {
return false
}
case *pb.RequestOp_RequestPut:
if tv.RequestPut == nil {
continue
}
if !aa.as.IsPutPermitted(aa.user, tv.RequestPut.Key) {
return false
}
case *pb.RequestOp_RequestDeleteRange:
if tv.RequestDeleteRange == nil {
continue
}
if !aa.as.IsDeleteRangePermitted(aa.user, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) {
return false
}
}
}
return true
}
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
for _, c := range rt.Compare {
if !aa.as.IsRangePermitted(aa.user, c.Key, nil) {
return nil, auth.ErrPermissionDenied
}
}
if !aa.checkTxnReqsPermission(rt.Success) {
return nil, auth.ErrPermissionDenied
}
if !aa.checkTxnReqsPermission(rt.Failure) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.Txn(rt)
}
func needAdminPermission(r *pb.InternalRaftRequest) bool {
switch {
case r.AuthEnable != nil:
return true
case r.AuthDisable != nil:
return true
case r.AuthUserAdd != nil:
return true
case r.AuthUserDelete != nil:
return true
case r.AuthUserChangePassword != nil:
return true
case r.AuthUserGrantRole != nil:
return true
case r.AuthUserGet != nil:
return true
case r.AuthUserRevokeRole != nil:
return true
case r.AuthRoleAdd != nil:
return true
case r.AuthRoleGrantPermission != nil:
return true
case r.AuthRoleGet != nil:
return true
case r.AuthRoleRevokePermission != nil:
return true
case r.AuthRoleDelete != nil:
return true
case r.AuthUserList != nil:
return true
case r.AuthRoleList != nil:
return true
default:
return false
}
}

140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"encoding/json"
"path"
"time"
"github.com/coreos/etcd/etcdserver/api"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/store"
"github.com/coreos/go-semver/semver"
)
// ApplierV2 is the interface for processing V2 raft messages
type ApplierV2 interface {
Delete(r *pb.Request) Response
Post(r *pb.Request) Response
Put(r *pb.Request) Response
QGet(r *pb.Request) Response
Sync(r *pb.Request) Response
}
func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
return &applierV2store{store: s, cluster: c}
}
type applierV2store struct {
store store.Store
cluster *membership.RaftCluster
}
func (a *applierV2store) Delete(r *pb.Request) Response {
switch {
case r.PrevIndex > 0 || r.PrevValue != "":
return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
default:
return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
}
}
func (a *applierV2store) Post(r *pb.Request) Response {
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r)))
}
func (a *applierV2store) Put(r *pb.Request) Response {
ttlOptions := toTTLOptions(r)
exists, existsSet := pbutil.GetBool(r.PrevExist)
switch {
case existsSet:
if exists {
if r.PrevIndex == 0 && r.PrevValue == "" {
return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
}
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
}
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
case r.PrevIndex > 0 || r.PrevValue != "":
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
default:
if storeMemberAttributeRegexp.MatchString(r.Path) {
id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
var attr membership.Attributes
if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
}
if a.cluster != nil {
a.cluster.UpdateAttributes(id, attr)
}
// return an empty response since there is no consumer.
return Response{}
}
if r.Path == membership.StoreClusterVersionKey() {
if a.cluster != nil {
a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
}
// return an empty response since there is no consumer.
return Response{}
}
return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
}
}
func (a *applierV2store) QGet(r *pb.Request) Response {
return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
}
func (a *applierV2store) Sync(r *pb.Request) Response {
a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
return Response{}
}
// applyV2Request interprets r as a call to store.X and returns a Response interpreted
// from store.Event
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
toTTLOptions(r)
switch r.Method {
case "POST":
return s.applyV2.Post(r)
case "PUT":
return s.applyV2.Put(r)
case "DELETE":
return s.applyV2.Delete(r)
case "QGET":
return s.applyV2.QGet(r)
case "SYNC":
return s.applyV2.Sync(r)
default:
// This should never be reached, but just in case:
return Response{err: ErrUnknownMethod}
}
}
func toTTLOptions(r *pb.Request) store.TTLOptionSet {
refresh, _ := pbutil.GetBool(r.Refresh)
ttlOptions := store.TTLOptionSet{Refresh: refresh}
if r.Expiration != 0 {
ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
}
return ttlOptions
}
func toResponse(ev *store.Event, err error) Response {
return Response{Event: ev, err: err}
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -281,13 +281,7 @@ func (s *store) UpdateUser(user User) (User, error) {
return old, err
}
hash, err := s.HashPassword(user.Password)
if err != nil {
return old, err
}
user.Password = hash
newUser, err := old.merge(user)
newUser, err := old.merge(user, s.PasswordStore)
if err != nil {
return old, err
}
@ -335,11 +329,7 @@ func (s *store) GetRole(name string) (Role, error) {
}
var r Role
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
if err != nil {
return r, err
}
return r, nil
return r, err
}
func (s *store) CreateRole(role Role) error {
@ -452,29 +442,33 @@ func (s *store) DisableAuth() error {
// is called and returns a new User with these modifications applied. Think of
// all Users as immutable sets of data. Merge allows you to perform the set
// operations (desired grants and revokes) atomically
func (u User) merge(n User) (User, error) {
func (ou User) merge(nu User, s PasswordStore) (User, error) {
var out User
if u.User != n.User {
return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", u.User, n.User)
if ou.User != nu.User {
return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User)
}
out.User = u.User
if n.Password != "" {
out.Password = n.Password
out.User = ou.User
if nu.Password != "" {
hash, err := s.HashPassword(nu.Password)
if err != nil {
return ou, err
}
out.Password = hash
} else {
out.Password = u.Password
out.Password = ou.Password
}
currentRoles := types.NewUnsafeSet(u.Roles...)
for _, g := range n.Grant {
currentRoles := types.NewUnsafeSet(ou.Roles...)
for _, g := range nu.Grant {
if currentRoles.Contains(g) {
plog.Noticef("granting duplicate role %s for user %s", g, n.User)
return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, n.User))
plog.Noticef("granting duplicate role %s for user %s", g, nu.User)
return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User))
}
currentRoles.Add(g)
}
for _, r := range n.Revoke {
for _, r := range nu.Revoke {
if !currentRoles.Contains(r) {
plog.Noticef("revoking ungranted role %s for user %s", r, n.User)
return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, n.User))
plog.Noticef("revoking ungranted role %s for user %s", r, nu.User)
return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User))
}
currentRoles.Remove(r)
}
@ -497,10 +491,7 @@ func (r Role) merge(n Role) (Role, error) {
return out, err
}
out.Permissions, err = out.Permissions.Revoke(n.Revoke)
if err != nil {
return out, err
}
return out, nil
return out, err
}
func (r Role) HasKeyAccess(key string, write bool) bool {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -73,6 +73,7 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
continue
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
if logerr {
plog.Warningf("could not read the body of cluster response: %v", err)

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -56,6 +56,9 @@ type ServerConfig struct {
StrictReconfigCheck bool
EnablePprof bool
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
ClientCertAuthEnabled bool
}
// VerifyBootstrap sanity-checks the initial config for bootstrap case

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,12 +14,20 @@
package etcdserver
import (
"sync/atomic"
)
// consistentIndex represents the offset of an entry in a consistent replica log.
// It implements the storage.ConsistentIndexGetter interface.
// It implements the mvcc.ConsistentIndexGetter interface.
// It is always set to the offset of current entry before executing the entry,
// so ConsistentWatchableKV could get the consistent index from it.
type consistentIndex uint64
func (i *consistentIndex) setConsistentIndex(v uint64) { *i = consistentIndex(v) }
func (i *consistentIndex) setConsistentIndex(v uint64) {
atomic.StoreUint64((*uint64)(i), v)
}
func (i *consistentIndex) ConsistentIndex() uint64 { return uint64(*i) }
func (i *consistentIndex) ConsistentIndex() uint64 {
return atomic.LoadUint64((*uint64)(i))
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -30,6 +30,7 @@ var (
ErrNoLeader = errors.New("etcdserver: no leader")
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
ErrNoSpace = errors.New("etcdserver: no space")
ErrInvalidAuthToken = errors.New("etcdserver: invalid auth token")
)
type DiscoveryError struct {

View File

@ -13,8 +13,10 @@
It has these top-level messages:
Request
Metadata
RequestHeader
InternalRaftRequest
EmptyResponse
InternalAuthenticateRequest
ResponseHeader
RangeRequest
RangeResponse
@ -22,8 +24,8 @@
PutResponse
DeleteRangeRequest
DeleteRangeResponse
RequestUnion
ResponseUnion
RequestOp
ResponseOp
Compare
TxnRequest
TxnResponse
@ -31,6 +33,8 @@
CompactionResponse
HashRequest
HashResponse
SnapshotRequest
SnapshotResponse
WatchRequest
WatchCreateRequest
WatchCancelRequest
@ -64,13 +68,15 @@
AuthUserGetRequest
AuthUserDeleteRequest
AuthUserChangePasswordRequest
AuthUserGrantRequest
AuthUserRevokeRequest
AuthUserGrantRoleRequest
AuthUserRevokeRoleRequest
AuthRoleAddRequest
AuthRoleGetRequest
AuthUserListRequest
AuthRoleListRequest
AuthRoleDeleteRequest
AuthRoleGrantRequest
AuthRoleRevokeRequest
AuthRoleGrantPermissionRequest
AuthRoleRevokePermissionRequest
AuthEnableResponse
AuthDisableResponse
AuthenticateResponse
@ -78,20 +84,22 @@
AuthUserGetResponse
AuthUserDeleteResponse
AuthUserChangePasswordResponse
AuthUserGrantResponse
AuthUserRevokeResponse
AuthUserGrantRoleResponse
AuthUserRevokeRoleResponse
AuthRoleAddResponse
AuthRoleGetResponse
AuthRoleListResponse
AuthUserListResponse
AuthRoleDeleteResponse
AuthRoleGrantResponse
AuthRoleRevokeResponse
AuthRoleGrantPermissionResponse
AuthRoleRevokePermissionResponse
*/
package etcdserverpb
import (
"fmt"
proto "github.com/gogo/protobuf/proto"
proto "github.com/golang/protobuf/proto"
math "math"
)
@ -103,40 +111,46 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Request struct {
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD" json:"ID"`
Method string `protobuf:"bytes,2,opt,name=Method,json=method" json:"Method"`
Path string `protobuf:"bytes,3,opt,name=Path,json=path" json:"Path"`
Val string `protobuf:"bytes,4,opt,name=Val,json=val" json:"Val"`
Dir bool `protobuf:"varint,5,opt,name=Dir,json=dir" json:"Dir"`
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue,json=prevValue" json:"PrevValue"`
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex,json=prevIndex" json:"PrevIndex"`
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist,json=prevExist" json:"PrevExist,omitempty"`
Expiration int64 `protobuf:"varint,9,opt,name=Expiration,json=expiration" json:"Expiration"`
Wait bool `protobuf:"varint,10,opt,name=Wait,json=wait" json:"Wait"`
Since uint64 `protobuf:"varint,11,opt,name=Since,json=since" json:"Since"`
Recursive bool `protobuf:"varint,12,opt,name=Recursive,json=recursive" json:"Recursive"`
Sorted bool `protobuf:"varint,13,opt,name=Sorted,json=sorted" json:"Sorted"`
Quorum bool `protobuf:"varint,14,opt,name=Quorum,json=quorum" json:"Quorum"`
Time int64 `protobuf:"varint,15,opt,name=Time,json=time" json:"Time"`
Stream bool `protobuf:"varint,16,opt,name=Stream,json=stream" json:"Stream"`
Refresh *bool `protobuf:"varint,17,opt,name=Refresh,json=refresh" json:"Refresh,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
type Metadata struct {
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID,json=nodeID" json:"NodeID"`
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID,json=clusterID" json:"ClusterID"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {}
func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {}
func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} }
func init() {
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
@ -995,3 +1009,33 @@ var (
ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorEtcdserver = []byte{
// 404 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x6e, 0x13, 0x31,
0x14, 0x86, 0xe3, 0xc4, 0x99, 0x64, 0x4c, 0x81, 0x62, 0x45, 0xe8, 0xa9, 0x42, 0x43, 0x14, 0xb1,
0xc8, 0x0a, 0xee, 0x50, 0xd2, 0x45, 0x24, 0x8a, 0x4a, 0x8a, 0xca, 0xda, 0x64, 0x1e, 0x8d, 0xa5,
0xcc, 0x78, 0x6a, 0xbf, 0x19, 0x72, 0x03, 0xae, 0xc0, 0x91, 0xb2, 0xe4, 0x04, 0x08, 0xc2, 0x45,
0x90, 0x3d, 0x9d, 0x60, 0xba, 0xb3, 0xbe, 0xff, 0xf7, 0xef, 0xdf, 0xf6, 0x13, 0xa7, 0x48, 0xeb,
0xdc, 0xa1, 0x6d, 0xd0, 0xbe, 0xae, 0xac, 0x21, 0x23, 0x4f, 0xfe, 0x91, 0xea, 0xf3, 0xd9, 0xe4,
0xd6, 0xdc, 0x9a, 0x20, 0xbc, 0xf1, 0xab, 0xd6, 0x33, 0xfb, 0xc6, 0xc5, 0x68, 0x85, 0x77, 0x35,
0x3a, 0x92, 0x13, 0xd1, 0x5f, 0x2e, 0x80, 0x4d, 0xd9, 0x9c, 0x9f, 0xf3, 0xfd, 0xcf, 0x97, 0xbd,
0x55, 0x5f, 0x2f, 0xe4, 0x0b, 0x91, 0x5c, 0x22, 0x6d, 0x4c, 0x0e, 0xfd, 0x29, 0x9b, 0xa7, 0xf7,
0x4a, 0x52, 0x04, 0x26, 0x41, 0xf0, 0x2b, 0x45, 0x1b, 0x18, 0x44, 0x1a, 0xaf, 0x14, 0x6d, 0xe4,
0x73, 0x31, 0xb8, 0x51, 0x5b, 0xe0, 0x91, 0x30, 0x68, 0xd4, 0xd6, 0xf3, 0x85, 0xb6, 0x30, 0x9c,
0xb2, 0xf9, 0xb8, 0xe3, 0xb9, 0xb6, 0x72, 0x26, 0xd2, 0x2b, 0x8b, 0xcd, 0x8d, 0xda, 0xd6, 0x08,
0x49, 0xb4, 0x2b, 0xad, 0x3a, 0xdc, 0x79, 0x96, 0x65, 0x8e, 0x3b, 0x18, 0x45, 0x45, 0x83, 0x27,
0xe0, 0xce, 0x73, 0xb1, 0xd3, 0x8e, 0x60, 0x7c, 0x3c, 0x85, 0xb5, 0x9e, 0x80, 0xe5, 0x2b, 0x21,
0x2e, 0x76, 0x95, 0xb6, 0x8a, 0xb4, 0x29, 0x21, 0x9d, 0xb2, 0xf9, 0xe0, 0x3e, 0x48, 0xe0, 0x91,
0xfb, 0xbb, 0x7d, 0x52, 0x9a, 0x40, 0x44, 0x55, 0xf9, 0x57, 0xa5, 0x49, 0x9e, 0x89, 0xe1, 0xb5,
0x2e, 0xd7, 0x08, 0x8f, 0xa2, 0x0e, 0x43, 0xe7, 0x91, 0x3f, 0x7f, 0x85, 0xeb, 0xda, 0x3a, 0xdd,
0x20, 0x9c, 0x44, 0x5b, 0x53, 0xdb, 0x61, 0xff, 0xa6, 0xd7, 0xc6, 0x12, 0xe6, 0xf0, 0x38, 0x32,
0x24, 0x2e, 0x30, 0xaf, 0x7e, 0xa8, 0x8d, 0xad, 0x0b, 0x78, 0x12, 0xab, 0x77, 0x81, 0xf9, 0x56,
0x1f, 0x75, 0x81, 0xf0, 0x34, 0x6a, 0xcd, 0x49, 0x17, 0x6d, 0x2a, 0x59, 0x54, 0x05, 0x9c, 0xfe,
0x97, 0x1a, 0x98, 0xcc, 0xfc, 0x47, 0x7f, 0xb1, 0xe8, 0x36, 0xf0, 0x2c, 0x7a, 0x95, 0x91, 0x6d,
0xe1, 0xec, 0x9d, 0x18, 0x5f, 0x22, 0xa9, 0x5c, 0x91, 0xf2, 0x49, 0xef, 0x4d, 0x8e, 0x0f, 0xa6,
0x21, 0x29, 0x03, 0xf3, 0x37, 0x7c, 0xbb, 0xad, 0x1d, 0xa1, 0x5d, 0x2e, 0xc2, 0x50, 0x1c, 0x7f,
0x61, 0xdd, 0xe1, 0xf3, 0xc9, 0xfe, 0x77, 0xd6, 0xdb, 0x1f, 0x32, 0xf6, 0xe3, 0x90, 0xb1, 0x5f,
0x87, 0x8c, 0x7d, 0xff, 0x93, 0xf5, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40,
0xa4, 0x02, 0x00, 0x00,
}

File diff suppressed because it is too large Load Diff

View File

@ -10,10 +10,18 @@ option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_getters_all) = false;
message RequestHeader {
uint64 ID = 1;
// username is a username that is associated with an auth token of gRPC connection
string username = 2;
}
// An InternalRaftRequest is the union of all requests which can be
// sent via raft.
message InternalRaftRequest {
RequestHeader header = 100;
uint64 ID = 1;
Request v2 = 2;
RangeRequest range = 3;
@ -25,14 +33,40 @@ message InternalRaftRequest {
LeaseGrantRequest lease_grant = 8;
LeaseRevokeRequest lease_revoke = 9;
AuthEnableRequest auth_enable = 10;
AuthUserAddRequest auth_user_add = 11;
AuthUserDeleteRequest auth_user_delete = 12;
AuthUserChangePasswordRequest auth_user_change_password = 13;
AuthRoleAddRequest auth_role_add = 14;
AlarmRequest alarm = 10;
AlarmRequest alarm = 15;
AuthEnableRequest auth_enable = 1000;
AuthDisableRequest auth_disable = 1011;
InternalAuthenticateRequest authenticate = 1012;
AuthUserAddRequest auth_user_add = 1100;
AuthUserDeleteRequest auth_user_delete = 1101;
AuthUserGetRequest auth_user_get = 1102;
AuthUserChangePasswordRequest auth_user_change_password = 1103;
AuthUserGrantRoleRequest auth_user_grant_role = 1104;
AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
AuthUserListRequest auth_user_list = 1106;
AuthRoleListRequest auth_role_list = 1107;
AuthRoleAddRequest auth_role_add = 1200;
AuthRoleDeleteRequest auth_role_delete = 1201;
AuthRoleGetRequest auth_role_get = 1202;
AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
}
message EmptyResponse {
}
// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
message InternalAuthenticateRequest {
string name = 1;
string password = 2;
// simple_token is generated in API layer (etcdserver/v3_server.go)
string simple_token = 3;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,56 +2,107 @@ syntax = "proto3";
package etcdserverpb;
import "gogoproto/gogo.proto";
import "etcd/storage/storagepb/kv.proto";
import "etcd/mvcc/mvccpb/kv.proto";
import "etcd/auth/authpb/auth.proto";
// for grpc-gateway
import "google/api/annotations.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service KV {
// Range gets the keys in the range from the store.
rpc Range(RangeRequest) returns (RangeResponse) {}
// Range gets the keys in the range from the key-value store.
rpc Range(RangeRequest) returns (RangeResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/range"
body: "*"
};
}
// Put puts the given key into the store.
// A put request increases the revision of the store,
// Put puts the given key into the key-value store.
// A put request increments the revision of the key-value store
// and generates one event in the event history.
rpc Put(PutRequest) returns (PutResponse) {}
rpc Put(PutRequest) returns (PutResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/put"
body: "*"
};
}
// Delete deletes the given range from the store.
// A delete request increase the revision of the store,
// and generates one event in the event history.
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {}
// DeleteRange deletes the given range from the key-value store.
// A delete request increments the revision of the key-value store
// and generates a delete event in the event history for every deleted key.
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/deleterange"
body: "*"
};
}
// Txn processes all the requests in one transaction.
// A txn request increases the revision of the store,
// and generates events with the same revision in the event history.
// Txn processes multiple requests in a single transaction.
// A txn request increments the revision of the key-value store
// and generates events with the same revision for every completed request.
// It is not allowed to modify the same key several times within one txn.
rpc Txn(TxnRequest) returns (TxnResponse) {}
rpc Txn(TxnRequest) returns (TxnResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/txn"
body: "*"
};
}
// Compact compacts the event history in etcd. User should compact the
// event history periodically, or it will grow infinitely.
rpc Compact(CompactionRequest) returns (CompactionResponse) {}
// Compact compacts the event history in the etcd key-value store. The key-value
// store should be periodically compacted or the event history will continue to grow
// indefinitely.
rpc Compact(CompactionRequest) returns (CompactionResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/compaction"
body: "*"
};
}
}
service Watch {
// Watch watches the events happening or happened. Both input and output
// are stream. One watch rpc can watch for multiple keys or prefixs and
// get a stream of events. The whole events history can be watched unless
// compacted.
rpc Watch(stream WatchRequest) returns (stream WatchResponse) {}
// Watch watches for events happening or that have happened. Both input and output
// are streams; the input stream is for creating and canceling watchers and the output
// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
// for several watches at once. The entire event history can be watched starting from the
// last compaction revision.
rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
option (google.api.http) = {
post: "/v3alpha/watch"
body: "*"
};
}
}
service Lease {
// LeaseGrant creates a lease. A lease has a TTL. The lease will expire if the
// server does not receive a keepAlive within TTL from the lease holder.
// All keys attached to the lease will be expired and deleted if the lease expires.
// The key expiration generates an event in event history.
rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {}
// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
// within a given time to live period. All keys attached to the lease will be expired and
// deleted if the lease expires. Each expired key generates a delete event in the event history.
rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
option (google.api.http) = {
post: "/v3alpha/lease/grant"
body: "*"
};
}
// LeaseRevoke revokes a lease. All the key attached to the lease will be expired and deleted.
rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {}
// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/lease/revoke"
body: "*"
};
}
// KeepAlive keeps the lease alive.
rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {}
// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
// to the server and streaming keep alive responses from the server to the client.
rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
option (google.api.http) = {
post: "/v3alpha/lease/keepalive"
body: "*"
};
}
// TODO(xiangli) List all existing Leases?
// TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease?
@ -59,83 +110,220 @@ service Lease {
service Cluster {
// MemberAdd adds a member into the cluster.
rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {}
rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/add"
body: "*"
};
}
// MemberRemove removes an existing member from the cluster.
rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {}
rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/remove"
body: "*"
};
}
// MemberUpdate updates the member configuration.
rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {}
rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/update"
body: "*"
};
}
// MemberList lists all the members in the cluster.
rpc MemberList(MemberListRequest) returns (MemberListResponse) {}
rpc MemberList(MemberListRequest) returns (MemberListResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/list"
body: "*"
};
}
}
service Maintenance {
// Alarm activates, deactivates, and queries alarms regarding cluster health.
rpc Alarm(AlarmRequest) returns (AlarmResponse) {}
rpc Alarm(AlarmRequest) returns (AlarmResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/alarm"
body: "*"
};
}
// Status gets the status of the member.
rpc Status(StatusRequest) returns (StatusResponse) {}
rpc Status(StatusRequest) returns (StatusResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/status"
body: "*"
};
}
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {}
// Defragment defragments a member's backend database to recover storage space.
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/defragment"
body: "*"
};
}
// Hash returns the hash of the local KV state for consistency checking purpose.
// This is designed for testing; do not use this in production when there
// are ongoing transactions.
rpc Hash(HashRequest) returns (HashResponse) {}
rpc Hash(HashRequest) returns (HashResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/hash"
body: "*"
};
}
// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/snapshot"
body: "*"
};
}
}
service Auth {
// AuthEnable enables authentication.
rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {}
rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/enable"
body: "*"
};
}
// AuthDisable disables authentication.
rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {}
rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/disable"
body: "*"
};
}
// Authenticate processes authenticate request.
rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {}
// Authenticate processes an authenticate request.
rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/authenticate"
body: "*"
};
}
// UserAdd adds a new user.
rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {}
rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/add"
body: "*"
};
}
// UserGet gets a detailed information of a user or lists entire users.
rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {}
// UserGet gets detailed user information.
rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/get"
body: "*"
};
}
// UserList gets a list of all users.
rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/list"
body: "*"
};
}
// UserDelete deletes a specified user.
rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {}
rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/delete"
body: "*"
};
}
// UserChangePassword changes password of a specified user.
rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {}
// UserChangePassword changes the password of a specified user.
rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/changepw"
body: "*"
};
}
// UserGrant grants a role to a specified user.
rpc UserGrant(AuthUserGrantRequest) returns (AuthUserGrantResponse) {}
rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/grant"
body: "*"
};
}
// UserRevoke revokes a role of specified user.
rpc UserRevoke(AuthUserRevokeRequest) returns (AuthUserRevokeResponse) {}
// UserRevokeRole revokes a role of specified user.
rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/revoke"
body: "*"
};
}
// RoleAdd adds a new role.
rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {}
rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/add"
body: "*"
};
}
// RoleGet gets a detailed information of a role or lists entire roles.
rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {}
// RoleGet gets detailed role information.
rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/get"
body: "*"
};
}
// RoleList gets lists of all roles.
rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/list"
body: "*"
};
}
// RoleDelete deletes a specified role.
rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {}
rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/delete"
body: "*"
};
}
// RoleGrant grants a permission of a specified key or range to a specified role.
rpc RoleGrant(AuthRoleGrantRequest) returns (AuthRoleGrantResponse) {}
// RoleGrantPermission grants a permission of a specified key or range to a specified role.
rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/grant"
body: "*"
};
}
// RoleRevoke revokes a key or range permission of a specified role.
rpc RoleRevoke(AuthRoleRevokeRequest) returns (AuthRoleRevokeResponse) {}
// RoleRevokePermission revokes a key or range permission of a specified role.
rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/revoke"
body: "*"
};
}
}
message ResponseHeader {
// cluster_id is the ID of the cluster which sent the response.
uint64 cluster_id = 1;
// member_id is the ID of the member which sent the response.
uint64 member_id = 2;
// revision of the store when the request was applied.
// revision is the key-value store revision when the request was applied.
int64 revision = 3;
// term of raft when the request was applied.
// raft_term is the raft term when the request was applied.
uint64 raft_term = 4;
}
@ -153,43 +341,60 @@ message RangeRequest {
VALUE = 4;
}
// if the range_end is not given, the request returns the key.
// key is the first key for the range. If range_end is not given, the request only looks up key.
bytes key = 1;
// if the range_end is given, it gets the keys in range [key, range_end)
// if range_end is nonempty, otherwise it returns all keys >= key.
// range_end is the upper bound on the requested range [key, range_end).
// If range_end is '\0', the range is all keys >= key.
// If the range_end is one bit larger than the given key,
// then the range requests get the all keys with the prefix (the given key).
// If both key and range_end are '\0', then range requests returns all keys.
bytes range_end = 2;
// limit the number of keys returned.
// limit is a limit on the number of keys returned for the request.
int64 limit = 3;
// range over the store at the given revision.
// if revision is less or equal to zero, range over the newest store.
// if the revision has been compacted, ErrCompaction will be returned in
// response.
// revision is the point-in-time of the key-value store to use for the range.
// If revision is less or equal to zero, the range is over the newest key-value store.
// If the revision has been compacted, ErrCompacted is returned as a response.
int64 revision = 4;
// sort_order is the requested order for returned the results
// sort_order is the order for returned sorted results.
SortOrder sort_order = 5;
// sort_target is the kv field to use for sorting
// sort_target is the key-value field to use for sorting.
SortTarget sort_target = 6;
// range request is linearizable by default. Linearizable requests has a higher
// latency and lower throughput than serializable request.
// To reduce latency, serializable can be set. If serializable is set, range request
// will be serializable, but not linearizable with other requests.
// Serializable range can be served locally without waiting for other nodes in the cluster.
// serializable sets the range request to use serializable member-local reads.
// Range requests are linearizable by default; linearizable requests have higher
// latency and lower throughput than serializable requests but reflect the current
// consensus of the cluster. For better performance, in exchange for possible stale reads,
// a serializable range request is served locally without needing to reach consensus
// with other nodes in the cluster.
bool serializable = 7;
// keys_only when set returns only the keys and not the values.
bool keys_only = 8;
// count_only when set returns only the count of the keys in the range.
bool count_only = 9;
}
message RangeResponse {
ResponseHeader header = 1;
repeated storagepb.KeyValue kvs = 2;
// kvs is the list of key-value pairs matched by the range request.
// kvs is empty when count is requested.
repeated mvccpb.KeyValue kvs = 2;
// more indicates if there are more keys to return in the requested range.
bool more = 3;
// count is set to the number of keys within the range when requested.
int64 count = 4;
}
message PutRequest {
// key is the key, in bytes, to put into the key-value store.
bytes key = 1;
// value is the value, in bytes, to associate with the key in the key-value store.
bytes value = 2;
// lease is the lease ID to associate with the key in the key-value store. A lease
// value of 0 indicates no lease.
int64 lease = 3;
}
@ -198,19 +403,22 @@ message PutResponse {
}
message DeleteRangeRequest {
// if the range_end is not given, the request deletes the key.
// key is the first key to delete in the range.
bytes key = 1;
// if the range_end is given, it deletes the keys in range [key, range_end).
// range_end is the key following the last key to delete for the range [key, range_end).
// If range_end is not given, the range is defined to contain only the key argument.
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
bytes range_end = 2;
}
message DeleteRangeResponse {
ResponseHeader header = 1;
// Deleted is the number of keys that got deleted.
// deleted is the number of keys deleted by the delete range request.
int64 deleted = 2;
}
message RequestUnion {
message RequestOp {
// request is a union of request types accepted by a transaction.
oneof request {
RangeRequest request_range = 1;
PutRequest request_put = 2;
@ -218,7 +426,8 @@ message RequestUnion {
}
}
message ResponseUnion {
message ResponseOp {
// response is a union of response types returned by a transaction.
oneof response {
RangeResponse response_range = 1;
PutResponse response_put = 2;
@ -238,27 +447,24 @@ message Compare {
MOD = 2;
VALUE= 3;
}
// result is logical comparison operation for this comparison.
CompareResult result = 1;
// target is the key-value field to inspect for the comparison.
CompareTarget target = 2;
// key path
// key is the subject key for the comparison operation.
bytes key = 3;
oneof target_union {
// version of the given key
// version is the version of the given key
int64 version = 4;
// create revision of the given key
// create_revision is the creation revision of the given key
int64 create_revision = 5;
// last modified revision of the given key
// mod_revision is the last modified revision of the given key.
int64 mod_revision = 6;
// value of the given key
// value is the value of the given key, in bytes.
bytes value = 7;
}
}
// If the comparisons succeed, then the success requests will be processed in order,
// and the response will contain their respective responses in order.
// If the comparisons fail, then the failure requests will be processed in order,
// and the response will contain their respective responses in order.
// From google paxosdb paper:
// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
@ -275,26 +481,35 @@ message Compare {
// true.
// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
message TxnRequest {
// compare is a list of predicates representing a conjunction of terms.
// If the comparisons succeed, then the success requests will be processed in order,
// and the response will contain their respective responses in order.
// If the comparisons fail, then the failure requests will be processed in order,
// and the response will contain their respective responses in order.
repeated Compare compare = 1;
repeated RequestUnion success = 2;
repeated RequestUnion failure = 3;
// success is a list of requests which will be applied when compare evaluates to true.
repeated RequestOp success = 2;
// failure is a list of requests which will be applied when compare evaluates to false.
repeated RequestOp failure = 3;
}
message TxnResponse {
ResponseHeader header = 1;
// succeeded is set to true if the compare evaluated to true or false otherwise.
bool succeeded = 2;
repeated ResponseUnion responses = 3;
// responses is a list of responses corresponding to the results from applying
// success if succeeded is true or failure if succeeded is false.
repeated ResponseOp responses = 3;
}
// Compaction compacts the kv store upto the given revision (including).
// It removes the old versions of a key. It keeps the newest version of
// the key even if its latest modification revision is smaller than the given
// revision.
// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
// with a revision less than the compaction revision will be removed.
message CompactionRequest {
// revision is the key-value store revision for the compaction operation.
int64 revision = 1;
// physical is set so the RPC will wait until the compaction is physically
// applied to the local database such that compacted entries are totally
// removed from the backing store.
// removed from the backend database.
bool physical = 2;
}
@ -307,10 +522,27 @@ message HashRequest {
message HashResponse {
ResponseHeader header = 1;
// hash is the hash value computed from the responding member's key-value store.
uint32 hash = 2;
}
message SnapshotRequest {
}
message SnapshotResponse {
// header has the current key-value store information. The first header in the snapshot
// stream indicates the point in time of the snapshot.
ResponseHeader header = 1;
// remaining_bytes is the number of blob bytes to be sent after this message
uint64 remaining_bytes = 2;
// blob contains the next chunk of the snapshot in the snapshot stream.
bytes blob = 3;
}
message WatchRequest {
// request_union is a request to either create a new watcher or cancel an existing watcher.
oneof request_union {
WatchCreateRequest create_request = 1;
WatchCancelRequest cancel_request = 2;
@ -318,65 +550,69 @@ message WatchRequest {
}
message WatchCreateRequest {
// the key to be watched
// key is the key to register for watching.
bytes key = 1;
// if the range_end is given, keys in [key, range_end) are watched
// NOTE: only range_end == prefixEnd(key) is accepted now
// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
// only the key argument is watched. If range_end is equal to '\0', all keys greater than
// or equal to the key argument are watched.
bytes range_end = 2;
// start_revision is an optional revision (including) to watch from. No start_revision is "now".
// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
int64 start_revision = 3;
// if progress_notify is set, etcd server sends WatchResponse with empty events to the
// created watcher when there are no recent events. It is useful when clients want always to be
// able to recover a disconnected watcher from a recent known revision.
// etcdsever can decide how long it should send a notification based on current load.
// progress_notify is set so that the etcd server will periodically send a WatchResponse with
// no events to the new watcher if there are no recent events. It is useful when clients
// wish to recover a disconnected watcher starting from a recent known revision.
// The etcd server may decide how often it will send notifications based on current load.
bool progress_notify = 4;
}
message WatchCancelRequest {
// watch_id is the watcher id to cancel so that no more events are transmitted.
int64 watch_id = 1;
}
message WatchResponse {
ResponseHeader header = 1;
// watch_id is the ID of the watching the response sent to.
// watch_id is the ID of the watcher that corresponds to the response.
int64 watch_id = 2;
// If the response is for a create watch request, created is set to true.
// Client should record the watch_id and prepare for receiving events for
// that watching from the same stream.
// All events sent to the created watching will attach with the same watch_id.
// created is set to true if the response is for a create watch request.
// The client should record the watch_id and expect to receive events for
// the created watcher from the same stream.
// All events sent to the created watcher will attach with the same watch_id.
bool created = 3;
// If the response is for a cancel watch request, cancel is set to true.
// No further events will be sent to the canceled watching.
// canceled is set to true if the response is for a cancel watch request.
// No further events will be sent to the canceled watcher.
bool canceled = 4;
// CompactRevision is set to the minimum index if a watching tries to watch
// compact_revision is set to the minimum index if a watcher tries to watch
// at a compacted index.
//
// This happens when creating a watching at a compacted revision or the watching cannot
// catch up with the progress of the KV.
// This happens when creating a watcher at a compacted revision or the watcher cannot
// catch up with the progress of the key-value store.
//
// Client should treat the watching as canceled and should not try to create any
// watching with same start_revision again.
// The client should treat the watcher as canceled and should not try to create any
// watcher with the same start_revision again.
int64 compact_revision = 5;
repeated storagepb.Event events = 11;
repeated mvccpb.Event events = 11;
}
message LeaseGrantRequest {
// advisory ttl in seconds
// TTL is the advisory time-to-live in seconds.
int64 TTL = 1;
// requested ID to create; 0 lets lessor choose
// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
int64 ID = 2;
}
message LeaseGrantResponse {
ResponseHeader header = 1;
// ID is the lease ID for the granted lease.
int64 ID = 2;
// server decided ttl in second
// TTL is the server chosen lease time-to-live in seconds.
int64 TTL = 3;
string error = 4;
}
message LeaseRevokeRequest {
// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
int64 ID = 1;
}
@ -385,36 +621,42 @@ message LeaseRevokeResponse {
}
message LeaseKeepAliveRequest {
// ID is the lease ID for the lease to keep alive.
int64 ID = 1;
}
message LeaseKeepAliveResponse {
ResponseHeader header = 1;
// ID is the lease ID from the keep alive request.
int64 ID = 2;
// TTL is the new time-to-live for the lease.
int64 TTL = 3;
}
message Member {
// ID is the member ID for this member.
uint64 ID = 1;
// If the member is not started, name will be an empty string.
// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
string name = 2;
bool IsLeader = 3;
repeated string peerURLs = 4;
// If the member is not started, client_URLs will be an zero length
// string array.
repeated string clientURLs = 5;
// peerURLs is the list of URLs the member exposes to the cluster for communication.
repeated string peerURLs = 3;
// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
repeated string clientURLs = 4;
}
message MemberAddRequest {
// peerURLs is the list of URLs the added member will use to communicate with the cluster.
repeated string peerURLs = 1;
}
message MemberAddResponse {
ResponseHeader header = 1;
// member is the member information for the added member.
Member member = 2;
}
message MemberRemoveRequest {
// ID is the member ID of the member to remove.
uint64 ID = 1;
}
@ -423,7 +665,9 @@ message MemberRemoveResponse {
}
message MemberUpdateRequest {
// ID is the member ID of the member to update.
uint64 ID = 1;
// peerURLs is the new list of URLs the member will use to communicate with the cluster.
repeated string peerURLs = 2;
}
@ -436,11 +680,11 @@ message MemberListRequest {
message MemberListResponse {
ResponseHeader header = 1;
// members is a list of all members associated with the cluster.
repeated Member members = 2;
}
message DefragmentRequest {
}
message DefragmentResponse {
@ -449,7 +693,7 @@ message DefragmentResponse {
enum AlarmType {
NONE = 0; // default, used to query if any alarm is active
NOSPACE = 1;
NOSPACE = 1; // space quota is exhausted
}
message AlarmRequest {
@ -458,19 +702,27 @@ message AlarmRequest {
ACTIVATE = 1;
DEACTIVATE = 2;
}
// action is the kind of alarm request to issue. The action
// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
// raised alarm.
AlarmAction action = 1;
// MemberID is the member raising the alarm request
// memberID is the ID of the member associated with the alarm. If memberID is 0, the
// alarm request covers all members.
uint64 memberID = 2;
// alarm is the type of alarm to consider for this request.
AlarmType alarm = 3;
}
message AlarmMember {
uint64 memberID = 1;
AlarmType alarm = 2;
// memberID is the ID of the member associated with the raised alarm.
uint64 memberID = 1;
// alarm is the type of alarm which has been raised.
AlarmType alarm = 2;
}
message AlarmResponse {
ResponseHeader header = 1;
// alarms is a list of alarms associated with the alarm request.
repeated AlarmMember alarms = 2;
}
@ -479,7 +731,16 @@ message StatusRequest {
message StatusResponse {
ResponseHeader header = 1;
// version is the cluster protocol version used by the responding member.
string version = 2;
// dbSize is the size of the backend database, in bytes, of the responding member.
int64 dbSize = 3;
// leader is the member ID which the responding member believes is the current leader.
uint64 leader = 4;
// raftIndex is the current raft index of the responding member.
uint64 raftIndex = 5;
// raftTerm is the current raft term of the responding member.
uint64 raftTerm = 6;
}
message AuthEnableRequest {
@ -489,6 +750,8 @@ message AuthDisableRequest {
}
message AuthenticateRequest {
string name = 1;
string password = 2;
}
message AuthUserAddRequest {
@ -497,37 +760,63 @@ message AuthUserAddRequest {
}
message AuthUserGetRequest {
string name = 1;
}
message AuthUserDeleteRequest {
// name is the name of the user to delete.
string name = 1;
}
message AuthUserChangePasswordRequest {
// name is the name of the user whose password is being changed.
string name = 1;
// password is the new password for the user.
string password = 2;
}
message AuthUserGrantRequest {
message AuthUserGrantRoleRequest {
// user is the name of the user which should be granted a given role.
string user = 1;
// role is the name of the role to grant to the user.
string role = 2;
}
message AuthUserRevokeRequest {
message AuthUserRevokeRoleRequest {
string name = 1;
string role = 2;
}
message AuthRoleAddRequest {
// name is the name of the role to add to the authentication system.
string name = 1;
}
message AuthRoleGetRequest {
string role = 1;
}
message AuthUserListRequest {
}
message AuthRoleListRequest {
}
message AuthRoleDeleteRequest {
string role = 1;
}
message AuthRoleGrantRequest {
message AuthRoleGrantPermissionRequest {
// name is the name of the role which will be granted the permission.
string name = 1;
// perm is the permission to grant to the role.
authpb.Permission perm = 2;
}
message AuthRoleRevokeRequest {
message AuthRoleRevokePermissionRequest {
string role = 1;
string key = 2;
string range_end = 3;
}
message AuthEnableResponse {
@ -540,6 +829,8 @@ message AuthDisableResponse {
message AuthenticateResponse {
ResponseHeader header = 1;
// token is an authorized token that can be used in succeeding RPCs
string token = 2;
}
message AuthUserAddResponse {
@ -548,6 +839,8 @@ message AuthUserAddResponse {
message AuthUserGetResponse {
ResponseHeader header = 1;
repeated string roles = 2;
}
message AuthUserDeleteResponse {
@ -558,11 +851,11 @@ message AuthUserChangePasswordResponse {
ResponseHeader header = 1;
}
message AuthUserGrantResponse {
message AuthUserGrantRoleResponse {
ResponseHeader header = 1;
}
message AuthUserRevokeResponse {
message AuthUserRevokeRoleResponse {
ResponseHeader header = 1;
}
@ -572,16 +865,30 @@ message AuthRoleAddResponse {
message AuthRoleGetResponse {
ResponseHeader header = 1;
repeated authpb.Permission perm = 2;
}
message AuthRoleListResponse {
ResponseHeader header = 1;
repeated string roles = 2;
}
message AuthUserListResponse {
ResponseHeader header = 1;
repeated string users = 2;
}
message AuthRoleDeleteResponse {
ResponseHeader header = 1;
}
message AuthRoleGrantResponse {
message AuthRoleGrantPermissionResponse {
ResponseHeader header = 1;
}
message AuthRoleRevokeResponse {
message AuthRoleRevokePermissionResponse {
ResponseHeader header = 1;
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -25,11 +25,11 @@ import (
"strings"
"sync"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/netutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/etcd/store"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
@ -144,9 +144,7 @@ func (c *RaftCluster) PeerURLs() []string {
defer c.Unlock()
urls := make([]string, 0)
for _, p := range c.members {
for _, addr := range p.PeerURLs {
urls = append(urls, addr)
}
urls = append(urls, p.PeerURLs...)
}
sort.Strings(urls)
return urls
@ -159,9 +157,7 @@ func (c *RaftCluster) ClientURLs() []string {
defer c.Unlock()
urls := make([]string, 0)
for _, p := range c.members {
for _, url := range p.ClientURLs {
urls = append(urls, url)
}
urls = append(urls, p.ClientURLs...)
}
sort.Strings(urls)
return urls
@ -199,13 +195,19 @@ func (c *RaftCluster) SetID(id types.ID) { c.id = id }
func (c *RaftCluster) SetStore(st store.Store) { c.store = st }
func (c *RaftCluster) Recover() {
func (c *RaftCluster) SetBackend(be backend.Backend) {
c.be = be
mustCreateBackendBuckets(c.be)
}
func (c *RaftCluster) Recover(onSet func(*semver.Version)) {
c.Lock()
defer c.Unlock()
c.members, c.removed = membersFromStore(c.store)
c.version = clusterVersionFromStore(c.store)
mustDetectDowngrade(c.version)
onSet(c.version)
for _, m := range c.members {
plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
@ -289,6 +291,8 @@ func (c *RaftCluster) AddMember(m *Member) {
}
c.members[m.ID] = m
plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
}
// RemoveMember removes a member from the store.
@ -305,23 +309,28 @@ func (c *RaftCluster) RemoveMember(id types.ID) {
delete(c.members, id)
c.removed[id] = true
plog.Infof("removed member %s from cluster %s", id, c.id)
}
func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) bool {
func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
c.Lock()
defer c.Unlock()
if m, ok := c.members[id]; ok {
m.Attributes = attr
return true
if c.store != nil {
mustUpdateMemberAttrInStore(c.store, m)
}
if c.be != nil {
mustSaveMemberToBackend(c.be, m)
}
return
}
_, ok := c.removed[id]
if ok {
plog.Warningf("skipped updating attributes of removed member %s", id)
} else {
if !ok {
plog.Panicf("error updating attributes of unknown member %s", id)
}
// TODO: update store in this function
return false
plog.Warningf("skipped updating attributes of removed member %s", id)
}
func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
@ -335,6 +344,8 @@ func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes)
if c.be != nil {
mustSaveMemberToBackend(c.be, c.members[id])
}
plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
}
func (c *RaftCluster) Version() *semver.Version {
@ -346,7 +357,7 @@ func (c *RaftCluster) Version() *semver.Version {
return semver.Must(semver.NewVersion(c.version.String()))
}
func (c *RaftCluster) SetVersion(ver *semver.Version) {
func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) {
c.Lock()
defer c.Unlock()
if c.version != nil {
@ -356,6 +367,13 @@ func (c *RaftCluster) SetVersion(ver *semver.Version) {
}
c.version = ver
mustDetectDowngrade(c.version)
if c.store != nil {
mustSaveClusterVersionToStore(c.store, ver)
}
if c.be != nil {
mustSaveClusterVersionToBackend(c.be, ver)
}
onSet(ver)
}
func (c *RaftCluster) IsReadyToAddNewMember() bool {
@ -371,7 +389,7 @@ func (c *RaftCluster) IsReadyToAddNewMember() bool {
if nstarted == 1 && nmembers == 2 {
// a case of adding a new node to 1-member cluster for restoring cluster data
// https://github.com/coreos/etcd/blob/master/Documentation/admin_guide.md#restoring-the-cluster
// https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster
plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
return true

Some files were not shown because too many files have changed in this diff Show More