Update github.com/coreos/etcd to v3.3.15
Change-Id: I1b16ca712238219d082427c75dd6bc404794abbf
This commit is contained in:
4
go.mod
4
go.mod
@@ -38,7 +38,7 @@ require (
|
|||||||
github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 // indirect
|
github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 // indirect
|
||||||
github.com/containernetworking/cni v0.7.1
|
github.com/containernetworking/cni v0.7.1
|
||||||
github.com/coredns/corefile-migration v1.0.2
|
github.com/coredns/corefile-migration v1.0.2
|
||||||
github.com/coreos/etcd v3.3.13+incompatible
|
github.com/coreos/etcd v3.3.15+incompatible
|
||||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
||||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
|
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
|
||||||
github.com/coreos/rkt v1.30.0 // indirect
|
github.com/coreos/rkt v1.30.0 // indirect
|
||||||
@@ -229,7 +229,7 @@ replace (
|
|||||||
github.com/containernetworking/cni => github.com/containernetworking/cni v0.7.1
|
github.com/containernetworking/cni => github.com/containernetworking/cni v0.7.1
|
||||||
github.com/coredns/corefile-migration => github.com/coredns/corefile-migration v1.0.2
|
github.com/coredns/corefile-migration => github.com/coredns/corefile-migration v1.0.2
|
||||||
github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.1-coreos.6
|
github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.1-coreos.6
|
||||||
github.com/coreos/etcd => github.com/coreos/etcd v3.3.13+incompatible
|
github.com/coreos/etcd => github.com/coreos/etcd v3.3.15+incompatible
|
||||||
github.com/coreos/go-etcd => github.com/coreos/go-etcd v2.0.0+incompatible
|
github.com/coreos/go-etcd => github.com/coreos/go-etcd v2.0.0+incompatible
|
||||||
github.com/coreos/go-oidc => github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416
|
github.com/coreos/go-oidc => github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416
|
||||||
github.com/coreos/go-semver => github.com/coreos/go-semver v0.3.0
|
github.com/coreos/go-semver => github.com/coreos/go-semver v0.3.0
|
||||||
|
|||||||
4
go.sum
4
go.sum
@@ -95,8 +95,8 @@ github.com/coredns/corefile-migration v1.0.2 h1:kQga1ATFIZdkBtU6c/oJdtASLcCRkDh3
|
|||||||
github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E=
|
github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
|
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416 h1:X+JQSgXg3CcxgcBoMAqU8NoS0fch8zHxjiKWcXclxaI=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416 h1:X+JQSgXg3CcxgcBoMAqU8NoS0fch8zHxjiKWcXclxaI=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ module k8s.io/apiextensions-apiserver
|
|||||||
go 1.12
|
go 1.12
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/coreos/etcd v3.3.13+incompatible
|
github.com/coreos/etcd v3.3.15+incompatible
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible
|
github.com/emicklei/go-restful v2.9.5+incompatible
|
||||||
github.com/go-openapi/errors v0.19.2
|
github.com/go-openapi/errors v0.19.2
|
||||||
github.com/go-openapi/spec v0.19.2
|
github.com/go-openapi/spec v0.19.2
|
||||||
|
|||||||
@@ -34,8 +34,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
|
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
@@ -64,8 +64,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0=
|
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=
|
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=
|
||||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ go 1.12
|
|||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6 // indirect
|
github.com/coreos/bbolt v1.3.1-coreos.6 // indirect
|
||||||
github.com/coreos/etcd v3.3.13+incompatible
|
github.com/coreos/etcd v3.3.15+incompatible
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416
|
||||||
github.com/coreos/go-semver v0.3.0 // indirect
|
github.com/coreos/go-semver v0.3.0 // indirect
|
||||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
||||||
@@ -15,7 +15,6 @@ require (
|
|||||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0
|
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible
|
github.com/emicklei/go-restful v2.9.5+incompatible
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible
|
github.com/evanphx/json-patch v4.2.0+incompatible
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 // indirect
|
|
||||||
github.com/go-openapi/spec v0.19.2
|
github.com/go-openapi/spec v0.19.2
|
||||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
|
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
|
||||||
github.com/google/go-cmp v0.3.0
|
github.com/google/go-cmp v0.3.0
|
||||||
|
|||||||
6
staging/src/k8s.io/apiserver/go.sum
generated
6
staging/src/k8s.io/apiserver/go.sum
generated
@@ -26,8 +26,8 @@ github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
|
|||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
|
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416 h1:X+JQSgXg3CcxgcBoMAqU8NoS0fch8zHxjiKWcXclxaI=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416 h1:X+JQSgXg3CcxgcBoMAqU8NoS0fch8zHxjiKWcXclxaI=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||||
@@ -54,8 +54,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0=
|
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||||
github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
|
github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ go 1.12
|
|||||||
require (
|
require (
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible
|
github.com/emicklei/go-restful v2.9.5+incompatible
|
||||||
|
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 // indirect
|
||||||
github.com/go-openapi/spec v0.19.2
|
github.com/go-openapi/spec v0.19.2
|
||||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
|
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
|
||||||
github.com/prometheus/client_golang v0.9.2
|
github.com/prometheus/client_golang v0.9.2
|
||||||
|
|||||||
4
staging/src/k8s.io/kube-aggregator/go.sum
generated
4
staging/src/k8s.io/kube-aggregator/go.sum
generated
@@ -28,8 +28,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
|
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
|||||||
3
staging/src/k8s.io/legacy-cloud-providers/go.sum
generated
3
staging/src/k8s.io/legacy-cloud-providers/go.sum
generated
@@ -36,7 +36,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
|||||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
@@ -59,7 +59,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||||
|
|||||||
6
staging/src/k8s.io/sample-apiserver/go.sum
generated
6
staging/src/k8s.io/sample-apiserver/go.sum
generated
@@ -28,8 +28,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
|
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
@@ -57,8 +57,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0=
|
|
||||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||||
github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
|
github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
|
||||||
|
|||||||
2
vendor/BUILD
vendored
2
vendor/BUILD
vendored
@@ -132,6 +132,7 @@ filegroup(
|
|||||||
"//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs",
|
"//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs",
|
"//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/srv:all-srcs",
|
"//vendor/github.com/coreos/etcd/pkg/srv:all-srcs",
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs",
|
"//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs",
|
"//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/transport:all-srcs",
|
"//vendor/github.com/coreos/etcd/pkg/transport:all-srcs",
|
||||||
@@ -440,6 +441,7 @@ filegroup(
|
|||||||
"//vendor/google.golang.org/api/pubsub/v1:all-srcs",
|
"//vendor/google.golang.org/api/pubsub/v1:all-srcs",
|
||||||
"//vendor/google.golang.org/api/tpu/v1:all-srcs",
|
"//vendor/google.golang.org/api/tpu/v1:all-srcs",
|
||||||
"//vendor/google.golang.org/appengine:all-srcs",
|
"//vendor/google.golang.org/appengine:all-srcs",
|
||||||
|
"//vendor/google.golang.org/genproto/googleapis/api/annotations:all-srcs",
|
||||||
"//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs",
|
"//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs",
|
||||||
"//vendor/google.golang.org/grpc:all-srcs",
|
"//vendor/google.golang.org/grpc:all-srcs",
|
||||||
"//vendor/gopkg.in/fsnotify.v1:all-srcs",
|
"//vendor/gopkg.in/fsnotify.v1:all-srcs",
|
||||||
|
|||||||
447
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
447
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
@@ -1,29 +1,16 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: auth.proto
|
// source: auth.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package authpb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
auth.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
User
|
|
||||||
Permission
|
|
||||||
Role
|
|
||||||
*/
|
|
||||||
package authpb
|
package authpb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
|
io "io"
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
|
||||||
_ "github.com/gogo/protobuf/gogoproto"
|
_ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
io "io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@@ -50,6 +37,7 @@ var Permission_Type_name = map[int32]string{
|
|||||||
1: "WRITE",
|
1: "WRITE",
|
||||||
2: "READWRITE",
|
2: "READWRITE",
|
||||||
}
|
}
|
||||||
|
|
||||||
var Permission_Type_value = map[string]int32{
|
var Permission_Type_value = map[string]int32{
|
||||||
"READ": 0,
|
"READ": 0,
|
||||||
"WRITE": 1,
|
"WRITE": 1,
|
||||||
@@ -59,53 +47,174 @@ var Permission_Type_value = map[string]int32{
|
|||||||
func (x Permission_Type) String() string {
|
func (x Permission_Type) String() string {
|
||||||
return proto.EnumName(Permission_Type_name, int32(x))
|
return proto.EnumName(Permission_Type_name, int32(x))
|
||||||
}
|
}
|
||||||
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
|
|
||||||
|
func (Permission_Type) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_8bbd6f3875b0e874, []int{1, 0}
|
||||||
|
}
|
||||||
|
|
||||||
// User is a single entry in the bucket authUsers
|
// User is a single entry in the bucket authUsers
|
||||||
type User struct {
|
type User struct {
|
||||||
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
|
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
|
||||||
Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
|
Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *User) Reset() { *m = User{} }
|
func (m *User) Reset() { *m = User{} }
|
||||||
func (m *User) String() string { return proto.CompactTextString(m) }
|
func (m *User) String() string { return proto.CompactTextString(m) }
|
||||||
func (*User) ProtoMessage() {}
|
func (*User) ProtoMessage() {}
|
||||||
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
|
func (*User) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_8bbd6f3875b0e874, []int{0}
|
||||||
|
}
|
||||||
|
func (m *User) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_User.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *User) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_User.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *User) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *User) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_User.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_User proto.InternalMessageInfo
|
||||||
|
|
||||||
// Permission is a single entity
|
// Permission is a single entity
|
||||||
type Permission struct {
|
type Permission struct {
|
||||||
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
|
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
|
||||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
|
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) Reset() { *m = Permission{} }
|
func (m *Permission) Reset() { *m = Permission{} }
|
||||||
func (m *Permission) String() string { return proto.CompactTextString(m) }
|
func (m *Permission) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Permission) ProtoMessage() {}
|
func (*Permission) ProtoMessage() {}
|
||||||
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
|
func (*Permission) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_8bbd6f3875b0e874, []int{1}
|
||||||
|
}
|
||||||
|
func (m *Permission) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Permission) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Permission.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Permission) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Permission) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Permission.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Permission proto.InternalMessageInfo
|
||||||
|
|
||||||
// Role is a single entry in the bucket authRoles
|
// Role is a single entry in the bucket authRoles
|
||||||
type Role struct {
|
type Role struct {
|
||||||
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
|
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) Reset() { *m = Role{} }
|
func (m *Role) Reset() { *m = Role{} }
|
||||||
func (m *Role) String() string { return proto.CompactTextString(m) }
|
func (m *Role) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Role) ProtoMessage() {}
|
func (*Role) ProtoMessage() {}
|
||||||
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
|
func (*Role) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_8bbd6f3875b0e874, []int{2}
|
||||||
|
}
|
||||||
|
func (m *Role) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Role.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Role) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Role.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Role) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Role) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Role.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Role proto.InternalMessageInfo
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
||||||
proto.RegisterType((*User)(nil), "authpb.User")
|
proto.RegisterType((*User)(nil), "authpb.User")
|
||||||
proto.RegisterType((*Permission)(nil), "authpb.Permission")
|
proto.RegisterType((*Permission)(nil), "authpb.Permission")
|
||||||
proto.RegisterType((*Role)(nil), "authpb.Role")
|
proto.RegisterType((*Role)(nil), "authpb.Role")
|
||||||
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
|
||||||
|
|
||||||
|
var fileDescriptor_8bbd6f3875b0e874 = []byte{
|
||||||
|
// 288 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||||
|
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
|
||||||
|
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
|
||||||
|
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
|
||||||
|
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
|
||||||
|
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
|
||||||
|
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
|
||||||
|
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
|
||||||
|
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
|
||||||
|
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
|
||||||
|
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
|
||||||
|
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
|
||||||
|
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
|
||||||
|
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
|
||||||
|
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
|
||||||
|
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
|
||||||
|
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
|
||||||
|
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
func (m *User) Marshal() (dAtA []byte, err error) {
|
func (m *User) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -113,44 +222,49 @@ func (m *User) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Name) > 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
|
||||||
i += copy(dAtA[i:], m.Name)
|
|
||||||
}
|
|
||||||
if len(m.Password) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
|
||||||
i += copy(dAtA[i:], m.Password)
|
|
||||||
}
|
}
|
||||||
if len(m.Roles) > 0 {
|
if len(m.Roles) > 0 {
|
||||||
for _, s := range m.Roles {
|
for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
i -= len(m.Roles[iNdEx])
|
||||||
|
copy(dAtA[i:], m.Roles[iNdEx])
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
|
||||||
|
i--
|
||||||
dAtA[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
|
||||||
l = len(s)
|
|
||||||
for l >= 1<<7 {
|
|
||||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
|
||||||
l >>= 7
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
dAtA[i] = uint8(l)
|
|
||||||
i++
|
|
||||||
i += copy(dAtA[i:], s)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
if len(m.Password) > 0 {
|
||||||
|
i -= len(m.Password)
|
||||||
|
copy(dAtA[i:], m.Password)
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
i -= len(m.Name)
|
||||||
|
copy(dAtA[i:], m.Name)
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -158,34 +272,45 @@ func (m *Permission) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.PermType != 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0x8
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
|
||||||
}
|
|
||||||
if len(m.Key) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
|
||||||
i += copy(dAtA[i:], m.Key)
|
|
||||||
}
|
}
|
||||||
if len(m.RangeEnd) > 0 {
|
if len(m.RangeEnd) > 0 {
|
||||||
dAtA[i] = 0x1a
|
i -= len(m.RangeEnd)
|
||||||
i++
|
copy(dAtA[i:], m.RangeEnd)
|
||||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
||||||
i += copy(dAtA[i:], m.RangeEnd)
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
}
|
}
|
||||||
return i, nil
|
if len(m.Key) > 0 {
|
||||||
|
i -= len(m.Key)
|
||||||
|
copy(dAtA[i:], m.Key)
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
if m.PermType != 0 {
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) Marshal() (dAtA []byte, err error) {
|
func (m *Role) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -193,41 +318,58 @@ func (m *Role) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Name) > 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
|
||||||
i += copy(dAtA[i:], m.Name)
|
|
||||||
}
|
}
|
||||||
if len(m.KeyPermission) > 0 {
|
if len(m.KeyPermission) > 0 {
|
||||||
for _, msg := range m.KeyPermission {
|
for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
dAtA[i] = 0x12
|
{
|
||||||
i++
|
size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||||
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
|
if err != nil {
|
||||||
n, err := msg.MarshalTo(dAtA[i:])
|
return 0, err
|
||||||
if err != nil {
|
}
|
||||||
return 0, err
|
i -= size
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(size))
|
||||||
}
|
}
|
||||||
i += n
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
if len(m.Name) > 0 {
|
||||||
|
i -= len(m.Name)
|
||||||
|
copy(dAtA[i:], m.Name)
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovAuth(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *User) Size() (n int) {
|
func (m *User) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
l = len(m.Name)
|
l = len(m.Name)
|
||||||
@@ -244,10 +386,16 @@ func (m *User) Size() (n int) {
|
|||||||
n += 1 + l + sovAuth(uint64(l))
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) Size() (n int) {
|
func (m *Permission) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.PermType != 0 {
|
if m.PermType != 0 {
|
||||||
@@ -261,10 +409,16 @@ func (m *Permission) Size() (n int) {
|
|||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovAuth(uint64(l))
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) Size() (n int) {
|
func (m *Role) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
l = len(m.Name)
|
l = len(m.Name)
|
||||||
@@ -277,18 +431,14 @@ func (m *Role) Size() (n int) {
|
|||||||
n += 1 + l + sovAuth(uint64(l))
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func sovAuth(x uint64) (n int) {
|
func sovAuth(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozAuth(x uint64) (n int) {
|
func sozAuth(x uint64) (n int) {
|
||||||
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@@ -308,7 +458,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -336,7 +486,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -345,6 +495,9 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -367,7 +520,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -376,6 +529,9 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -398,7 +554,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -408,6 +564,9 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + intStringLen
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -422,9 +581,13 @@ func (m *User) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -449,7 +612,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -477,7 +640,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
m.PermType |= Permission_Type(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -496,7 +659,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -505,6 +668,9 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -527,7 +693,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -536,6 +702,9 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -553,9 +722,13 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -580,7 +753,7 @@ func (m *Role) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -608,7 +781,7 @@ func (m *Role) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -617,6 +790,9 @@ func (m *Role) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -639,7 +815,7 @@ func (m *Role) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -648,6 +824,9 @@ func (m *Role) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + msglen
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -665,9 +844,13 @@ func (m *Role) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthAuth
|
return ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -731,10 +914,13 @@ func skipAuth(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iNdEx += length
|
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return 0, ErrInvalidLengthAuth
|
return 0, ErrInvalidLengthAuth
|
||||||
}
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
for {
|
||||||
@@ -763,6 +949,9 @@ func skipAuth(dAtA []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
iNdEx = start + next
|
iNdEx = start + next
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 4:
|
case 4:
|
||||||
@@ -781,27 +970,3 @@ var (
|
|||||||
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
|
||||||
|
|
||||||
var fileDescriptorAuth = []byte{
|
|
||||||
// 288 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
|
||||||
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
|
|
||||||
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
|
|
||||||
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
|
|
||||||
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
|
|
||||||
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
|
|
||||||
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
|
|
||||||
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
|
|
||||||
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
|
|
||||||
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
|
|
||||||
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
|
|
||||||
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
|
|
||||||
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
|
|
||||||
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
|
|
||||||
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
|
|
||||||
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
|
|
||||||
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
|
|
||||||
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|||||||
8
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
8
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
@@ -27,8 +27,8 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
readPerms := &adt.IntervalTree{}
|
readPerms := adt.NewIntervalTree()
|
||||||
writePerms := &adt.IntervalTree{}
|
writePerms := adt.NewIntervalTree()
|
||||||
|
|
||||||
for _, roleName := range user.Roles {
|
for _, roleName := range user.Roles {
|
||||||
role := getRole(tx, roleName)
|
role := getRole(tx, roleName)
|
||||||
@@ -128,6 +128,6 @@ func (as *authStore) invalidateCachedPerm(userName string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type unifiedRangePermissions struct {
|
type unifiedRangePermissions struct {
|
||||||
readPerms *adt.IntervalTree
|
readPerms adt.IntervalTree
|
||||||
writePerms *adt.IntervalTree
|
writePerms adt.IntervalTree
|
||||||
}
|
}
|
||||||
|
|||||||
14
vendor/github.com/coreos/etcd/clientv3/BUILD
generated
vendored
14
vendor/github.com/coreos/etcd/clientv3/BUILD
generated
vendored
@@ -10,17 +10,17 @@ go_library(
|
|||||||
"compare.go",
|
"compare.go",
|
||||||
"config.go",
|
"config.go",
|
||||||
"doc.go",
|
"doc.go",
|
||||||
"health_balancer.go",
|
|
||||||
"kv.go",
|
"kv.go",
|
||||||
"lease.go",
|
"lease.go",
|
||||||
"logger.go",
|
"logger.go",
|
||||||
"maintenance.go",
|
"maintenance.go",
|
||||||
"op.go",
|
"op.go",
|
||||||
"options.go",
|
"options.go",
|
||||||
"ready_wait.go",
|
|
||||||
"retry.go",
|
"retry.go",
|
||||||
|
"retry_interceptor.go",
|
||||||
"sort.go",
|
"sort.go",
|
||||||
"txn.go",
|
"txn.go",
|
||||||
|
"utils.go",
|
||||||
"watch.go",
|
"watch.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/clientv3",
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/clientv3",
|
||||||
@@ -28,15 +28,21 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//vendor/github.com/coreos/etcd/auth/authpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/auth/authpb:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer/picker:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/credentials:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||||
|
"//vendor/github.com/google/uuid:go_default_library",
|
||||||
|
"//vendor/go.uber.org/zap:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc:go_default_library",
|
"//vendor/google.golang.org/grpc:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library",
|
|
||||||
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||||
@@ -54,7 +60,9 @@ filegroup(
|
|||||||
name = "all-srcs",
|
name = "all-srcs",
|
||||||
srcs = [
|
srcs = [
|
||||||
":package-srcs",
|
":package-srcs",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/clientv3/concurrency:all-srcs",
|
"//vendor/github.com/coreos/etcd/clientv3/concurrency:all-srcs",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/credentials:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/clientv3/namespace:all-srcs",
|
"//vendor/github.com/coreos/etcd/clientv3/namespace:all-srcs",
|
||||||
"//vendor/github.com/coreos/etcd/clientv3/naming:all-srcs",
|
"//vendor/github.com/coreos/etcd/clientv3/naming:all-srcs",
|
||||||
],
|
],
|
||||||
|
|||||||
85
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
85
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
@@ -1,85 +0,0 @@
|
|||||||
# etcd/clientv3
|
|
||||||
|
|
||||||
[](https://godoc.org/github.com/coreos/etcd/clientv3)
|
|
||||||
|
|
||||||
`etcd/clientv3` is the official Go etcd client for v3.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/coreos/etcd/clientv3
|
|
||||||
```
|
|
||||||
|
|
||||||
## Get started
|
|
||||||
|
|
||||||
Create client using `clientv3.New`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
cli, err := clientv3.New(clientv3.Config{
|
|
||||||
Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
|
||||||
DialTimeout: 5 * time.Second,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
// handle error!
|
|
||||||
}
|
|
||||||
defer cli.Close()
|
|
||||||
```
|
|
||||||
|
|
||||||
etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
|
|
||||||
[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
|
|
||||||
If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
|
|
||||||
pass `context.WithTimeout` to APIs:
|
|
||||||
|
|
||||||
```go
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
||||||
resp, err := cli.Put(ctx, "sample_key", "sample_value")
|
|
||||||
cancel()
|
|
||||||
if err != nil {
|
|
||||||
// handle error!
|
|
||||||
}
|
|
||||||
// use the response
|
|
||||||
```
|
|
||||||
|
|
||||||
etcd uses `cmd/vendor` directory to store external dependencies, which are
|
|
||||||
to be compiled into etcd release binaries. `client` can be imported without
|
|
||||||
vendoring. For full compatibility, it is recommended to vendor builds using
|
|
||||||
etcd's vendored packages, using tools like godep, as in
|
|
||||||
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
|
|
||||||
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
|
|
||||||
|
|
||||||
## Error Handling
|
|
||||||
|
|
||||||
etcd client returns 2 types of errors:
|
|
||||||
|
|
||||||
1. context error: canceled or deadline exceeded.
|
|
||||||
2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
|
|
||||||
|
|
||||||
Here is the example code to handle client errors:
|
|
||||||
|
|
||||||
```go
|
|
||||||
resp, err := cli.Put(ctx, "", "")
|
|
||||||
if err != nil {
|
|
||||||
switch err {
|
|
||||||
case context.Canceled:
|
|
||||||
log.Fatalf("ctx is canceled by another routine: %v", err)
|
|
||||||
case context.DeadlineExceeded:
|
|
||||||
log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
|
|
||||||
case rpctypes.ErrEmptyKey:
|
|
||||||
log.Fatalf("client-side error: %v", err)
|
|
||||||
default:
|
|
||||||
log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Metrics
|
|
||||||
|
|
||||||
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
|
||||||
|
|
||||||
## Namespacing
|
|
||||||
|
|
||||||
The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
|
||||||
4
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
4
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
@@ -216,8 +216,8 @@ func (auth *authenticator) close() {
|
|||||||
auth.conn.Close()
|
auth.conn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
|
func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
|
||||||
conn, err := grpc.Dial(endpoint, opts...)
|
conn, err := grpc.DialContext(ctx, target, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
41
vendor/github.com/coreos/etcd/clientv3/balancer/BUILD
generated
vendored
Normal file
41
vendor/github.com/coreos/etcd/clientv3/balancer/BUILD
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"balancer.go",
|
||||||
|
"utils.go",
|
||||||
|
],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/clientv3/balancer",
|
||||||
|
importpath = "github.com/coreos/etcd/clientv3/balancer",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer/connectivity:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer/picker:go_default_library",
|
||||||
|
"//vendor/go.uber.org/zap:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/balancer:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/connectivity:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/resolver:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/resolver/dns:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/resolver/passthrough:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [
|
||||||
|
":package-srcs",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer/connectivity:all-srcs",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer/picker:all-srcs",
|
||||||
|
"//vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint:all-srcs",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
293
vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
generated
vendored
Normal file
293
vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
generated
vendored
Normal file
@@ -0,0 +1,293 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package balancer implements client balancer.
|
||||||
|
package balancer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3/balancer/connectivity"
|
||||||
|
"github.com/coreos/etcd/clientv3/balancer/picker"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
grpcconnectivity "google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
_ "google.golang.org/grpc/resolver/dns" // register DNS resolver
|
||||||
|
_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config defines balancer configurations.
|
||||||
|
type Config struct {
|
||||||
|
// Policy configures balancer policy.
|
||||||
|
Policy picker.Policy
|
||||||
|
|
||||||
|
// Picker implements gRPC picker.
|
||||||
|
// Leave empty if "Policy" field is not custom.
|
||||||
|
// TODO: currently custom policy is not supported.
|
||||||
|
// Picker picker.Picker
|
||||||
|
|
||||||
|
// Name defines an additional name for balancer.
|
||||||
|
// Useful for balancer testing to avoid register conflicts.
|
||||||
|
// If empty, defaults to policy name.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Logger configures balancer logging.
|
||||||
|
// If nil, logs are discarded.
|
||||||
|
Logger *zap.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
|
||||||
|
// must be invoked at initialization time.
|
||||||
|
func RegisterBuilder(cfg Config) {
|
||||||
|
bb := &builder{cfg}
|
||||||
|
balancer.Register(bb)
|
||||||
|
|
||||||
|
bb.cfg.Logger.Debug(
|
||||||
|
"registered balancer",
|
||||||
|
zap.String("policy", bb.cfg.Policy.String()),
|
||||||
|
zap.String("name", bb.cfg.Name),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type builder struct {
|
||||||
|
cfg Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build is called initially when creating "ccBalancerWrapper".
|
||||||
|
// "grpc.Dial" is called to this client connection.
|
||||||
|
// Then, resolved addresses will be handled via "HandleResolvedAddrs".
|
||||||
|
func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||||
|
bb := &baseBalancer{
|
||||||
|
id: strconv.FormatInt(time.Now().UnixNano(), 36),
|
||||||
|
policy: b.cfg.Policy,
|
||||||
|
name: b.cfg.Name,
|
||||||
|
lg: b.cfg.Logger,
|
||||||
|
|
||||||
|
addrToSc: make(map[resolver.Address]balancer.SubConn),
|
||||||
|
scToAddr: make(map[balancer.SubConn]resolver.Address),
|
||||||
|
scToSt: make(map[balancer.SubConn]grpcconnectivity.State),
|
||||||
|
|
||||||
|
currentConn: nil,
|
||||||
|
connectivityRecorder: connectivity.New(b.cfg.Logger),
|
||||||
|
|
||||||
|
// initialize picker always returns "ErrNoSubConnAvailable"
|
||||||
|
picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: support multiple connections
|
||||||
|
bb.mu.Lock()
|
||||||
|
bb.currentConn = cc
|
||||||
|
bb.mu.Unlock()
|
||||||
|
|
||||||
|
bb.lg.Info(
|
||||||
|
"built balancer",
|
||||||
|
zap.String("balancer-id", bb.id),
|
||||||
|
zap.String("policy", bb.policy.String()),
|
||||||
|
zap.String("resolver-target", cc.Target()),
|
||||||
|
)
|
||||||
|
return bb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name implements "grpc/balancer.Builder" interface.
|
||||||
|
func (b *builder) Name() string { return b.cfg.Name }
|
||||||
|
|
||||||
|
// Balancer defines client balancer interface.
|
||||||
|
type Balancer interface {
|
||||||
|
// Balancer is called on specified client connection. Client initiates gRPC
|
||||||
|
// connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
|
||||||
|
// addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
|
||||||
|
// For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
|
||||||
|
// "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
|
||||||
|
// changes, thus requires failover logic in this method.
|
||||||
|
balancer.Balancer
|
||||||
|
|
||||||
|
// Picker calls "Pick" for every client request.
|
||||||
|
picker.Picker
|
||||||
|
}
|
||||||
|
|
||||||
|
type baseBalancer struct {
|
||||||
|
id string
|
||||||
|
policy picker.Policy
|
||||||
|
name string
|
||||||
|
lg *zap.Logger
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
addrToSc map[resolver.Address]balancer.SubConn
|
||||||
|
scToAddr map[balancer.SubConn]resolver.Address
|
||||||
|
scToSt map[balancer.SubConn]grpcconnectivity.State
|
||||||
|
|
||||||
|
currentConn balancer.ClientConn
|
||||||
|
connectivityRecorder connectivity.Recorder
|
||||||
|
|
||||||
|
picker picker.Picker
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
|
||||||
|
// gRPC sends initial or updated resolved addresses from "Build".
|
||||||
|
func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||||
|
if err != nil {
|
||||||
|
bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bb.lg.Info("resolved",
|
||||||
|
zap.String("picker", bb.picker.String()),
|
||||||
|
zap.String("balancer-id", bb.id),
|
||||||
|
zap.Strings("addresses", addrsToStrings(addrs)),
|
||||||
|
)
|
||||||
|
|
||||||
|
bb.mu.Lock()
|
||||||
|
defer bb.mu.Unlock()
|
||||||
|
|
||||||
|
resolved := make(map[resolver.Address]struct{})
|
||||||
|
for _, addr := range addrs {
|
||||||
|
resolved[addr] = struct{}{}
|
||||||
|
if _, ok := bb.addrToSc[addr]; !ok {
|
||||||
|
sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
|
||||||
|
if err != nil {
|
||||||
|
bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bb.lg.Info("created subconn", zap.String("address", addr.Addr))
|
||||||
|
bb.addrToSc[addr] = sc
|
||||||
|
bb.scToAddr[sc] = addr
|
||||||
|
bb.scToSt[sc] = grpcconnectivity.Idle
|
||||||
|
sc.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for addr, sc := range bb.addrToSc {
|
||||||
|
if _, ok := resolved[addr]; !ok {
|
||||||
|
// was removed by resolver or failed to create subconn
|
||||||
|
bb.currentConn.RemoveSubConn(sc)
|
||||||
|
delete(bb.addrToSc, addr)
|
||||||
|
|
||||||
|
bb.lg.Info(
|
||||||
|
"removed subconn",
|
||||||
|
zap.String("picker", bb.picker.String()),
|
||||||
|
zap.String("balancer-id", bb.id),
|
||||||
|
zap.String("address", addr.Addr),
|
||||||
|
zap.String("subconn", scToString(sc)),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
|
||||||
|
// The entry will be deleted in HandleSubConnStateChange.
|
||||||
|
// (DO NOT) delete(bb.scToAddr, sc)
|
||||||
|
// (DO NOT) delete(bb.scToSt, sc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
|
||||||
|
func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
|
||||||
|
bb.mu.Lock()
|
||||||
|
defer bb.mu.Unlock()
|
||||||
|
|
||||||
|
old, ok := bb.scToSt[sc]
|
||||||
|
if !ok {
|
||||||
|
bb.lg.Warn(
|
||||||
|
"state change for an unknown subconn",
|
||||||
|
zap.String("picker", bb.picker.String()),
|
||||||
|
zap.String("balancer-id", bb.id),
|
||||||
|
zap.String("subconn", scToString(sc)),
|
||||||
|
zap.Int("subconn-size", len(bb.scToAddr)),
|
||||||
|
zap.String("state", s.String()),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bb.lg.Info(
|
||||||
|
"state changed",
|
||||||
|
zap.String("picker", bb.picker.String()),
|
||||||
|
zap.String("balancer-id", bb.id),
|
||||||
|
zap.Bool("connected", s == grpcconnectivity.Ready),
|
||||||
|
zap.String("subconn", scToString(sc)),
|
||||||
|
zap.Int("subconn-size", len(bb.scToAddr)),
|
||||||
|
zap.String("address", bb.scToAddr[sc].Addr),
|
||||||
|
zap.String("old-state", old.String()),
|
||||||
|
zap.String("new-state", s.String()),
|
||||||
|
)
|
||||||
|
|
||||||
|
bb.scToSt[sc] = s
|
||||||
|
switch s {
|
||||||
|
case grpcconnectivity.Idle:
|
||||||
|
sc.Connect()
|
||||||
|
case grpcconnectivity.Shutdown:
|
||||||
|
// When an address was removed by resolver, b called RemoveSubConn but
|
||||||
|
// kept the sc's state in scToSt. Remove state for this sc here.
|
||||||
|
delete(bb.scToAddr, sc)
|
||||||
|
delete(bb.scToSt, sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
oldAggrState := bb.connectivityRecorder.GetCurrentState()
|
||||||
|
bb.connectivityRecorder.RecordTransition(old, s)
|
||||||
|
|
||||||
|
// Update balancer picker when one of the following happens:
|
||||||
|
// - this sc became ready from not-ready
|
||||||
|
// - this sc became not-ready from ready
|
||||||
|
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||||
|
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||||
|
if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
|
||||||
|
(bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
|
||||||
|
bb.updatePicker()
|
||||||
|
}
|
||||||
|
|
||||||
|
bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb *baseBalancer) updatePicker() {
|
||||||
|
if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
|
||||||
|
bb.picker = picker.NewErr(balancer.ErrTransientFailure)
|
||||||
|
bb.lg.Info(
|
||||||
|
"updated picker to transient error picker",
|
||||||
|
zap.String("picker", bb.picker.String()),
|
||||||
|
zap.String("balancer-id", bb.id),
|
||||||
|
zap.String("policy", bb.policy.String()),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// only pass ready subconns to picker
|
||||||
|
scToAddr := make(map[balancer.SubConn]resolver.Address)
|
||||||
|
for addr, sc := range bb.addrToSc {
|
||||||
|
if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
|
||||||
|
scToAddr[sc] = addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bb.picker = picker.New(picker.Config{
|
||||||
|
Policy: bb.policy,
|
||||||
|
Logger: bb.lg,
|
||||||
|
SubConnToResolverAddress: scToAddr,
|
||||||
|
})
|
||||||
|
bb.lg.Info(
|
||||||
|
"updated picker",
|
||||||
|
zap.String("picker", bb.picker.String()),
|
||||||
|
zap.String("balancer-id", bb.id),
|
||||||
|
zap.String("policy", bb.policy.String()),
|
||||||
|
zap.Strings("subconn-ready", scsToStrings(scToAddr)),
|
||||||
|
zap.Int("subconn-size", len(scToAddr)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close implements "grpc/balancer.Balancer" interface.
|
||||||
|
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||||
|
// and it doesn't need to call RemoveSubConn for the SubConns.
|
||||||
|
func (bb *baseBalancer) Close() {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
27
vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/BUILD
generated
vendored
Normal file
27
vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/BUILD
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["connectivity.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity",
|
||||||
|
importpath = "github.com/coreos/etcd/clientv3/balancer/connectivity",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/go.uber.org/zap:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/connectivity:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
93
vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
generated
vendored
Normal file
93
vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
// Copyright 2019 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package connectivity implements client connectivity operations.
|
||||||
|
package connectivity
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Recorder records gRPC connectivity.
|
||||||
|
type Recorder interface {
|
||||||
|
GetCurrentState() connectivity.State
|
||||||
|
RecordTransition(oldState, newState connectivity.State)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Recorder.
|
||||||
|
func New(lg *zap.Logger) Recorder {
|
||||||
|
return &recorder{lg: lg}
|
||||||
|
}
|
||||||
|
|
||||||
|
// recorder takes the connectivity states of multiple SubConns
|
||||||
|
// and returns one aggregated connectivity state.
|
||||||
|
// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
|
||||||
|
type recorder struct {
|
||||||
|
lg *zap.Logger
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
cur connectivity.State
|
||||||
|
|
||||||
|
numReady uint64 // Number of addrConns in ready state.
|
||||||
|
numConnecting uint64 // Number of addrConns in connecting state.
|
||||||
|
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *recorder) GetCurrentState() (state connectivity.State) {
|
||||||
|
rc.mu.RLock()
|
||||||
|
defer rc.mu.RUnlock()
|
||||||
|
return rc.cur
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordTransition records state change happening in subConn and based on that
|
||||||
|
// it evaluates what aggregated state should be.
|
||||||
|
//
|
||||||
|
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||||
|
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||||
|
// - Else the aggregated state is TransientFailure.
|
||||||
|
//
|
||||||
|
// Idle and Shutdown are not considered.
|
||||||
|
//
|
||||||
|
// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
|
||||||
|
func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
|
||||||
|
rc.mu.Lock()
|
||||||
|
defer rc.mu.Unlock()
|
||||||
|
|
||||||
|
for idx, state := range []connectivity.State{oldState, newState} {
|
||||||
|
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||||
|
switch state {
|
||||||
|
case connectivity.Ready:
|
||||||
|
rc.numReady += updateVal
|
||||||
|
case connectivity.Connecting:
|
||||||
|
rc.numConnecting += updateVal
|
||||||
|
case connectivity.TransientFailure:
|
||||||
|
rc.numTransientFailure += updateVal
|
||||||
|
default:
|
||||||
|
rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch { // must be exclusive, no overlap
|
||||||
|
case rc.numReady > 0:
|
||||||
|
rc.cur = connectivity.Ready
|
||||||
|
case rc.numConnecting > 0:
|
||||||
|
rc.cur = connectivity.Connecting
|
||||||
|
default:
|
||||||
|
rc.cur = connectivity.TransientFailure
|
||||||
|
}
|
||||||
|
}
|
||||||
34
vendor/github.com/coreos/etcd/clientv3/balancer/picker/BUILD
generated
vendored
Normal file
34
vendor/github.com/coreos/etcd/clientv3/balancer/picker/BUILD
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"err.go",
|
||||||
|
"picker.go",
|
||||||
|
"roundrobin_balanced.go",
|
||||||
|
],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/clientv3/balancer/picker",
|
||||||
|
importpath = "github.com/coreos/etcd/clientv3/balancer/picker",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/go.uber.org/zap:go_default_library",
|
||||||
|
"//vendor/go.uber.org/zap/zapcore:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/balancer:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/resolver:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
16
vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package picker defines/implements client balancer picker policy.
|
||||||
|
package picker
|
||||||
39
vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
generated
vendored
Normal file
39
vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package picker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewErr returns a picker that always returns err on "Pick".
|
||||||
|
func NewErr(err error) Picker {
|
||||||
|
return &errPicker{p: Error, err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
type errPicker struct {
|
||||||
|
p Policy
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ep *errPicker) String() string {
|
||||||
|
return ep.p.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
|
return nil, nil, ep.err
|
||||||
|
}
|
||||||
91
vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
generated
vendored
Normal file
91
vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package picker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Picker defines balancer Picker methods.
|
||||||
|
type Picker interface {
|
||||||
|
balancer.Picker
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config defines picker configuration.
|
||||||
|
type Config struct {
|
||||||
|
// Policy specifies etcd clientv3's built in balancer policy.
|
||||||
|
Policy Policy
|
||||||
|
|
||||||
|
// Logger defines picker logging object.
|
||||||
|
Logger *zap.Logger
|
||||||
|
|
||||||
|
// SubConnToResolverAddress maps each gRPC sub-connection to an address.
|
||||||
|
// Basically, it is a list of addresses that the Picker can pick from.
|
||||||
|
SubConnToResolverAddress map[balancer.SubConn]resolver.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// Policy defines balancer picker policy.
|
||||||
|
type Policy uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Error is error picker policy.
|
||||||
|
Error Policy = iota
|
||||||
|
|
||||||
|
// RoundrobinBalanced balances loads over multiple endpoints
|
||||||
|
// and implements failover in roundrobin fashion.
|
||||||
|
RoundrobinBalanced
|
||||||
|
|
||||||
|
// Custom defines custom balancer picker.
|
||||||
|
// TODO: custom picker is not supported yet.
|
||||||
|
Custom
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p Policy) String() string {
|
||||||
|
switch p {
|
||||||
|
case Error:
|
||||||
|
return "picker-error"
|
||||||
|
|
||||||
|
case RoundrobinBalanced:
|
||||||
|
return "picker-roundrobin-balanced"
|
||||||
|
|
||||||
|
case Custom:
|
||||||
|
panic("'custom' picker policy is not supported yet")
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Picker.
|
||||||
|
func New(cfg Config) Picker {
|
||||||
|
switch cfg.Policy {
|
||||||
|
case Error:
|
||||||
|
panic("'error' picker policy is not supported here; use 'picker.NewErr'")
|
||||||
|
|
||||||
|
case RoundrobinBalanced:
|
||||||
|
return newRoundrobinBalanced(cfg)
|
||||||
|
|
||||||
|
case Custom:
|
||||||
|
panic("'custom' picker policy is not supported yet")
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
|
||||||
|
}
|
||||||
|
}
|
||||||
95
vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
generated
vendored
Normal file
95
vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package picker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newRoundrobinBalanced returns a new roundrobin balanced picker.
|
||||||
|
func newRoundrobinBalanced(cfg Config) Picker {
|
||||||
|
scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
|
||||||
|
for sc := range cfg.SubConnToResolverAddress {
|
||||||
|
scs = append(scs, sc)
|
||||||
|
}
|
||||||
|
return &rrBalanced{
|
||||||
|
p: RoundrobinBalanced,
|
||||||
|
lg: cfg.Logger,
|
||||||
|
scs: scs,
|
||||||
|
scToAddr: cfg.SubConnToResolverAddress,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type rrBalanced struct {
|
||||||
|
p Policy
|
||||||
|
|
||||||
|
lg *zap.Logger
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
next int
|
||||||
|
scs []balancer.SubConn
|
||||||
|
scToAddr map[balancer.SubConn]resolver.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *rrBalanced) String() string { return rb.p.String() }
|
||||||
|
|
||||||
|
// Pick is called for every client request.
|
||||||
|
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
|
rb.mu.RLock()
|
||||||
|
n := len(rb.scs)
|
||||||
|
rb.mu.RUnlock()
|
||||||
|
if n == 0 {
|
||||||
|
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||||
|
}
|
||||||
|
|
||||||
|
rb.mu.Lock()
|
||||||
|
cur := rb.next
|
||||||
|
sc := rb.scs[cur]
|
||||||
|
picked := rb.scToAddr[sc].Addr
|
||||||
|
rb.next = (rb.next + 1) % len(rb.scs)
|
||||||
|
rb.mu.Unlock()
|
||||||
|
|
||||||
|
rb.lg.Debug(
|
||||||
|
"picked",
|
||||||
|
zap.String("picker", rb.p.String()),
|
||||||
|
zap.String("address", picked),
|
||||||
|
zap.Int("subconn-index", cur),
|
||||||
|
zap.Int("subconn-size", n),
|
||||||
|
)
|
||||||
|
|
||||||
|
doneFunc := func(info balancer.DoneInfo) {
|
||||||
|
// TODO: error handling?
|
||||||
|
fss := []zapcore.Field{
|
||||||
|
zap.Error(info.Err),
|
||||||
|
zap.String("picker", rb.p.String()),
|
||||||
|
zap.String("address", picked),
|
||||||
|
zap.Bool("success", info.Err == nil),
|
||||||
|
zap.Bool("bytes-sent", info.BytesSent),
|
||||||
|
zap.Bool("bytes-received", info.BytesReceived),
|
||||||
|
}
|
||||||
|
if info.Err == nil {
|
||||||
|
rb.lg.Debug("balancer done", fss...)
|
||||||
|
} else {
|
||||||
|
rb.lg.Warn("balancer failed", fss...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sc, doneFunc, nil
|
||||||
|
}
|
||||||
24
vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/BUILD
generated
vendored
Normal file
24
vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/BUILD
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["endpoint.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint",
|
||||||
|
importpath = "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = ["//vendor/google.golang.org/grpc/resolver:go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
240
vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
generated
vendored
Normal file
240
vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
generated
vendored
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
|
||||||
|
package endpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
const scheme = "endpoint"
|
||||||
|
|
||||||
|
var (
|
||||||
|
targetPrefix = fmt.Sprintf("%s://", scheme)
|
||||||
|
|
||||||
|
bldr *builder
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
bldr = &builder{
|
||||||
|
resolverGroups: make(map[string]*ResolverGroup),
|
||||||
|
}
|
||||||
|
resolver.Register(bldr)
|
||||||
|
}
|
||||||
|
|
||||||
|
type builder struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
resolverGroups map[string]*ResolverGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewResolverGroup creates a new ResolverGroup with the given id.
|
||||||
|
func NewResolverGroup(id string) (*ResolverGroup, error) {
|
||||||
|
return bldr.newResolverGroup(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
|
||||||
|
// up-to-date.
|
||||||
|
type ResolverGroup struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
id string
|
||||||
|
endpoints []string
|
||||||
|
resolvers []*Resolver
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ResolverGroup) addResolver(r *Resolver) {
|
||||||
|
e.mu.Lock()
|
||||||
|
addrs := epsToAddrs(e.endpoints...)
|
||||||
|
e.resolvers = append(e.resolvers, r)
|
||||||
|
e.mu.Unlock()
|
||||||
|
r.cc.NewAddress(addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ResolverGroup) removeResolver(r *Resolver) {
|
||||||
|
e.mu.Lock()
|
||||||
|
for i, er := range e.resolvers {
|
||||||
|
if er == r {
|
||||||
|
e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
|
||||||
|
// immediately with the new endpoints.
|
||||||
|
func (e *ResolverGroup) SetEndpoints(endpoints []string) {
|
||||||
|
addrs := epsToAddrs(endpoints...)
|
||||||
|
e.mu.Lock()
|
||||||
|
e.endpoints = endpoints
|
||||||
|
for _, r := range e.resolvers {
|
||||||
|
r.cc.NewAddress(addrs)
|
||||||
|
}
|
||||||
|
e.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
|
||||||
|
func (e *ResolverGroup) Target(endpoint string) string {
|
||||||
|
return Target(e.id, endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target constructs a endpoint resolver target.
|
||||||
|
func Target(id, endpoint string) string {
|
||||||
|
return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTarget checks if a given target string in an endpoint resolver target.
|
||||||
|
func IsTarget(target string) bool {
|
||||||
|
return strings.HasPrefix(target, "endpoint://")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ResolverGroup) Close() {
|
||||||
|
bldr.close(e.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
|
||||||
|
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||||
|
if len(target.Authority) < 1 {
|
||||||
|
return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
|
||||||
|
}
|
||||||
|
id := target.Authority
|
||||||
|
es, err := b.getResolverGroup(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to build resolver: %v", err)
|
||||||
|
}
|
||||||
|
r := &Resolver{
|
||||||
|
endpointID: id,
|
||||||
|
cc: cc,
|
||||||
|
}
|
||||||
|
es.addResolver(r)
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
|
||||||
|
b.mu.RLock()
|
||||||
|
_, ok := b.resolverGroups[id]
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if ok {
|
||||||
|
return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
es := &ResolverGroup{id: id}
|
||||||
|
b.mu.Lock()
|
||||||
|
b.resolverGroups[id] = es
|
||||||
|
b.mu.Unlock()
|
||||||
|
return es, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
|
||||||
|
b.mu.RLock()
|
||||||
|
es, ok := b.resolverGroups[id]
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
|
||||||
|
}
|
||||||
|
return es, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) close(id string) {
|
||||||
|
b.mu.Lock()
|
||||||
|
delete(b.resolverGroups, id)
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) Scheme() string {
|
||||||
|
return scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolver provides a resolver for a single etcd cluster, identified by name.
|
||||||
|
type Resolver struct {
|
||||||
|
endpointID string
|
||||||
|
cc resolver.ClientConn
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: use balancer.epsToAddrs
|
||||||
|
func epsToAddrs(eps ...string) (addrs []resolver.Address) {
|
||||||
|
addrs = make([]resolver.Address, 0, len(eps))
|
||||||
|
for _, ep := range eps {
|
||||||
|
addrs = append(addrs, resolver.Address{Addr: ep})
|
||||||
|
}
|
||||||
|
return addrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||||
|
|
||||||
|
func (r *Resolver) Close() {
|
||||||
|
es, err := bldr.getResolverGroup(r.endpointID)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
es.removeResolver(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseEndpoint endpoint parses an endpoint of the form
|
||||||
|
// (http|https)://<host>*|(unix|unixs)://<path>)
|
||||||
|
// and returns a protocol ('tcp' or 'unix'),
|
||||||
|
// host (or filepath if a unix socket),
|
||||||
|
// scheme (http, https, unix, unixs).
|
||||||
|
func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||||
|
proto = "tcp"
|
||||||
|
host = endpoint
|
||||||
|
url, uerr := url.Parse(endpoint)
|
||||||
|
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||||
|
return proto, host, scheme
|
||||||
|
}
|
||||||
|
scheme = url.Scheme
|
||||||
|
|
||||||
|
// strip scheme:// prefix since grpc dials by host
|
||||||
|
host = url.Host
|
||||||
|
switch url.Scheme {
|
||||||
|
case "http", "https":
|
||||||
|
case "unix", "unixs":
|
||||||
|
proto = "unix"
|
||||||
|
host = url.Host + url.Path
|
||||||
|
default:
|
||||||
|
proto, host = "", ""
|
||||||
|
}
|
||||||
|
return proto, host, scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
|
||||||
|
// If the target is malformed, an error is returned.
|
||||||
|
func ParseTarget(target string) (string, string, error) {
|
||||||
|
noPrefix := strings.TrimPrefix(target, targetPrefix)
|
||||||
|
if noPrefix == target {
|
||||||
|
return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(noPrefix, "/", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
|
||||||
|
}
|
||||||
|
return parts[0], parts[1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseHostPort splits a "<host>:<port>" string into the host and port parts.
|
||||||
|
// The port part is optional.
|
||||||
|
func ParseHostPort(hostPort string) (host string, port string) {
|
||||||
|
parts := strings.SplitN(hostPort, ":", 2)
|
||||||
|
host = parts[0]
|
||||||
|
if len(parts) > 1 {
|
||||||
|
port = parts[1]
|
||||||
|
}
|
||||||
|
return host, port
|
||||||
|
}
|
||||||
68
vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
generated
vendored
Normal file
68
vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package balancer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
func scToString(sc balancer.SubConn) string {
|
||||||
|
return fmt.Sprintf("%p", sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
|
||||||
|
ss = make([]string, 0, len(scs))
|
||||||
|
for sc, a := range scs {
|
||||||
|
ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
|
||||||
|
}
|
||||||
|
sort.Strings(ss)
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
|
||||||
|
func addrsToStrings(addrs []resolver.Address) (ss []string) {
|
||||||
|
ss = make([]string, len(addrs))
|
||||||
|
for i := range addrs {
|
||||||
|
ss[i] = addrs[i].Addr
|
||||||
|
}
|
||||||
|
sort.Strings(ss)
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
|
||||||
|
func epsToAddrs(eps ...string) (addrs []resolver.Address) {
|
||||||
|
addrs = make([]resolver.Address, 0, len(eps))
|
||||||
|
for _, ep := range eps {
|
||||||
|
u, err := url.Parse(ep)
|
||||||
|
if err != nil {
|
||||||
|
addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
|
||||||
|
}
|
||||||
|
return addrs
|
||||||
|
}
|
||||||
|
|
||||||
|
var genN = new(uint32)
|
||||||
|
|
||||||
|
func genName() string {
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
|
||||||
|
}
|
||||||
449
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
449
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@@ -16,21 +16,26 @@ package clientv3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/coreos/etcd/clientv3/balancer"
|
||||||
|
"github.com/coreos/etcd/clientv3/balancer/picker"
|
||||||
|
"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
|
||||||
|
"github.com/coreos/etcd/clientv3/credentials"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"github.com/coreos/etcd/pkg/logutil"
|
||||||
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
grpccredentials "google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
@@ -39,8 +44,31 @@ import (
|
|||||||
var (
|
var (
|
||||||
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
||||||
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
||||||
|
|
||||||
|
roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
lg := zap.NewNop()
|
||||||
|
if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
|
||||||
|
lcfg := logutil.DefaultZapLoggerConfig
|
||||||
|
lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
lg, err = lcfg.Build() // info level logging
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: support custom balancer
|
||||||
|
balancer.RegisterBuilder(balancer.Config{
|
||||||
|
Policy: picker.RoundrobinBalanced,
|
||||||
|
Name: roundRobinBalancerName,
|
||||||
|
Logger: lg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Client provides and manages an etcd v3 client session.
|
// Client provides and manages an etcd v3 client session.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
Cluster
|
Cluster
|
||||||
@@ -50,13 +78,12 @@ type Client struct {
|
|||||||
Auth
|
Auth
|
||||||
Maintenance
|
Maintenance
|
||||||
|
|
||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
dialerrc chan error
|
|
||||||
|
|
||||||
cfg Config
|
cfg Config
|
||||||
creds *credentials.TransportCredentials
|
creds grpccredentials.TransportCredentials
|
||||||
balancer *healthBalancer
|
resolverGroup *endpoint.ResolverGroup
|
||||||
mu *sync.RWMutex
|
mu *sync.RWMutex
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@@ -64,11 +91,12 @@ type Client struct {
|
|||||||
// Username is a user name for authentication.
|
// Username is a user name for authentication.
|
||||||
Username string
|
Username string
|
||||||
// Password is a password for authentication.
|
// Password is a password for authentication.
|
||||||
Password string
|
Password string
|
||||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
authTokenBundle credentials.Bundle
|
||||||
tokenCred *authTokenCredential
|
|
||||||
|
|
||||||
callOpts []grpc.CallOption
|
callOpts []grpc.CallOption
|
||||||
|
|
||||||
|
lg *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new etcdv3 client from a given configuration.
|
// New creates a new etcdv3 client from a given configuration.
|
||||||
@@ -93,11 +121,19 @@ func NewFromURL(url string) (*Client, error) {
|
|||||||
return New(Config{Endpoints: []string{url}})
|
return New(Config{Endpoints: []string{url}})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewFromURLs creates a new etcdv3 client from URLs.
|
||||||
|
func NewFromURLs(urls []string) (*Client, error) {
|
||||||
|
return New(Config{Endpoints: urls})
|
||||||
|
}
|
||||||
|
|
||||||
// Close shuts down the client's etcd connections.
|
// Close shuts down the client's etcd connections.
|
||||||
func (c *Client) Close() error {
|
func (c *Client) Close() error {
|
||||||
c.cancel()
|
c.cancel()
|
||||||
c.Watcher.Close()
|
c.Watcher.Close()
|
||||||
c.Lease.Close()
|
c.Lease.Close()
|
||||||
|
if c.resolverGroup != nil {
|
||||||
|
c.resolverGroup.Close()
|
||||||
|
}
|
||||||
if c.conn != nil {
|
if c.conn != nil {
|
||||||
return toErr(c.ctx, c.conn.Close())
|
return toErr(c.ctx, c.conn.Close())
|
||||||
}
|
}
|
||||||
@@ -111,9 +147,9 @@ func (c *Client) Ctx() context.Context { return c.ctx }
|
|||||||
|
|
||||||
// Endpoints lists the registered endpoints for the client.
|
// Endpoints lists the registered endpoints for the client.
|
||||||
func (c *Client) Endpoints() []string {
|
func (c *Client) Endpoints() []string {
|
||||||
|
// copy the slice; protect original endpoints from being changed
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
defer c.mu.RUnlock()
|
defer c.mu.RUnlock()
|
||||||
// copy the slice; protect original endpoints from being changed
|
|
||||||
eps := make([]string, len(c.cfg.Endpoints))
|
eps := make([]string, len(c.cfg.Endpoints))
|
||||||
copy(eps, c.cfg.Endpoints)
|
copy(eps, c.cfg.Endpoints)
|
||||||
return eps
|
return eps
|
||||||
@@ -122,22 +158,9 @@ func (c *Client) Endpoints() []string {
|
|||||||
// SetEndpoints updates client's endpoints.
|
// SetEndpoints updates client's endpoints.
|
||||||
func (c *Client) SetEndpoints(eps ...string) {
|
func (c *Client) SetEndpoints(eps ...string) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
c.cfg.Endpoints = eps
|
c.cfg.Endpoints = eps
|
||||||
c.mu.Unlock()
|
c.resolverGroup.SetEndpoints(eps)
|
||||||
c.balancer.updateAddrs(eps...)
|
|
||||||
|
|
||||||
// updating notifyCh can trigger new connections,
|
|
||||||
// need update addrs if all connections are down
|
|
||||||
// or addrs does not include pinAddr.
|
|
||||||
c.balancer.mu.RLock()
|
|
||||||
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
|
|
||||||
c.balancer.mu.RUnlock()
|
|
||||||
if update {
|
|
||||||
select {
|
|
||||||
case c.balancer.updateAddrsC <- notifyNext:
|
|
||||||
case <-c.balancer.stopc:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||||
@@ -168,52 +191,13 @@ func (c *Client) autoSync() {
|
|||||||
err := c.Sync(ctx)
|
err := c.Sync(ctx)
|
||||||
cancel()
|
cancel()
|
||||||
if err != nil && err != c.ctx.Err() {
|
if err != nil && err != c.ctx.Err() {
|
||||||
logger.Println("Auto sync endpoints failed:", err)
|
lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type authTokenCredential struct {
|
func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
|
||||||
token string
|
|
||||||
tokenMu *sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
|
||||||
cred.tokenMu.RLock()
|
|
||||||
defer cred.tokenMu.RUnlock()
|
|
||||||
return map[string]string{
|
|
||||||
"token": cred.token,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
|
||||||
proto = "tcp"
|
|
||||||
host = endpoint
|
|
||||||
url, uerr := url.Parse(endpoint)
|
|
||||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
|
||||||
return proto, host, scheme
|
|
||||||
}
|
|
||||||
scheme = url.Scheme
|
|
||||||
|
|
||||||
// strip scheme:// prefix since grpc dials by host
|
|
||||||
host = url.Host
|
|
||||||
switch url.Scheme {
|
|
||||||
case "http", "https":
|
|
||||||
case "unix", "unixs":
|
|
||||||
proto = "unix"
|
|
||||||
host = url.Host + url.Path
|
|
||||||
default:
|
|
||||||
proto, host = "", ""
|
|
||||||
}
|
|
||||||
return proto, host, scheme
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
|
||||||
creds = c.creds
|
creds = c.creds
|
||||||
switch scheme {
|
switch scheme {
|
||||||
case "unix":
|
case "unix":
|
||||||
@@ -223,83 +207,87 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
|||||||
if creds != nil {
|
if creds != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
tlsconfig := &tls.Config{}
|
creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
|
||||||
emptyCreds := credentials.NewTLS(tlsconfig)
|
|
||||||
creds = &emptyCreds
|
|
||||||
default:
|
default:
|
||||||
creds = nil
|
creds = nil
|
||||||
}
|
}
|
||||||
return creds
|
return creds
|
||||||
}
|
}
|
||||||
|
|
||||||
// dialSetupOpts gives the dial opts prior to any authentication
|
// dialSetupOpts gives the dial opts prior to any authentication.
|
||||||
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
|
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
|
||||||
if c.cfg.DialTimeout > 0 {
|
|
||||||
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
|
||||||
}
|
|
||||||
if c.cfg.DialKeepAliveTime > 0 {
|
if c.cfg.DialKeepAliveTime > 0 {
|
||||||
params := keepalive.ClientParameters{
|
params := keepalive.ClientParameters{
|
||||||
Time: c.cfg.DialKeepAliveTime,
|
Time: c.cfg.DialKeepAliveTime,
|
||||||
Timeout: c.cfg.DialKeepAliveTimeout,
|
Timeout: c.cfg.DialKeepAliveTimeout,
|
||||||
|
PermitWithoutStream: c.cfg.PermitWithoutStream,
|
||||||
}
|
}
|
||||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||||
}
|
}
|
||||||
opts = append(opts, dopts...)
|
opts = append(opts, dopts...)
|
||||||
|
|
||||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
// Provide a net dialer that supports cancelation and timeout.
|
||||||
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
|
f := func(dialEp string, t time.Duration) (net.Conn, error) {
|
||||||
if host == "" && endpoint != "" {
|
proto, host, _ := endpoint.ParseEndpoint(dialEp)
|
||||||
// dialing an endpoint not in the balancer; use
|
|
||||||
// endpoint passed into dial
|
|
||||||
proto, host, _ = parseEndpoint(endpoint)
|
|
||||||
}
|
|
||||||
if proto == "" {
|
|
||||||
return nil, fmt.Errorf("unknown scheme for %q", host)
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-c.ctx.Done():
|
case <-c.ctx.Done():
|
||||||
return nil, c.ctx.Err()
|
return nil, c.ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
dialer := &net.Dialer{Timeout: t}
|
dialer := &net.Dialer{Timeout: t}
|
||||||
conn, err := dialer.DialContext(c.ctx, proto, host)
|
return dialer.DialContext(c.ctx, proto, host)
|
||||||
if err != nil {
|
|
||||||
select {
|
|
||||||
case c.dialerrc <- err:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return conn, err
|
|
||||||
}
|
}
|
||||||
opts = append(opts, grpc.WithDialer(f))
|
opts = append(opts, grpc.WithDialer(f))
|
||||||
|
|
||||||
creds := c.creds
|
|
||||||
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
|
|
||||||
creds = c.processCreds(scheme)
|
|
||||||
}
|
|
||||||
if creds != nil {
|
if creds != nil {
|
||||||
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
opts = append(opts, grpc.WithTransportCredentials(creds))
|
||||||
} else {
|
} else {
|
||||||
opts = append(opts, grpc.WithInsecure())
|
opts = append(opts, grpc.WithInsecure())
|
||||||
}
|
}
|
||||||
|
|
||||||
return opts
|
// Interceptor retry and backoff.
|
||||||
|
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
|
||||||
|
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
|
||||||
|
// once it is available.
|
||||||
|
rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
|
||||||
|
opts = append(opts,
|
||||||
|
// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
|
||||||
|
// Streams that are safe to retry are enabled individually.
|
||||||
|
grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
|
||||||
|
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
|
||||||
|
)
|
||||||
|
|
||||||
|
return opts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dial connects to a single endpoint using the client's config.
|
// Dial connects to a single endpoint using the client's config.
|
||||||
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
|
||||||
return c.dial(endpoint)
|
creds := c.directDialCreds(ep)
|
||||||
|
// Use the grpc passthrough resolver to directly dial a single endpoint.
|
||||||
|
// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
|
||||||
|
// by etcd without modification, allowing us to directly dial endpoints and
|
||||||
|
// using the same dial functions that we use for load balancer dialing.
|
||||||
|
return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) getToken(ctx context.Context) error {
|
func (c *Client) getToken(ctx context.Context) error {
|
||||||
var err error // return last error in a case of fail
|
var err error // return last error in a case of fail
|
||||||
var auth *authenticator
|
var auth *authenticator
|
||||||
|
|
||||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
eps := c.Endpoints()
|
||||||
endpoint := c.cfg.Endpoints[i]
|
for _, ep := range eps {
|
||||||
host := getHost(endpoint)
|
|
||||||
// use dial options without dopts to avoid reusing the client balancer
|
// use dial options without dopts to avoid reusing the client balancer
|
||||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
|
var dOpts []grpc.DialOption
|
||||||
|
_, host, _ := endpoint.ParseEndpoint(ep)
|
||||||
|
target := c.resolverGroup.Target(host)
|
||||||
|
creds := c.dialWithBalancerCreds(ep)
|
||||||
|
dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to configure auth dialer: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
|
||||||
|
auth, err = newAuthenticator(ctx, target, dOpts, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -308,56 +296,102 @@ func (c *Client) getToken(ctx context.Context) error {
|
|||||||
var resp *AuthenticateResponse
|
var resp *AuthenticateResponse
|
||||||
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// return err without retrying other endpoints
|
||||||
|
if err == rpctypes.ErrAuthNotEnabled {
|
||||||
|
return err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
c.tokenCred.tokenMu.Lock()
|
c.authTokenBundle.UpdateAuthToken(resp.Token)
|
||||||
c.tokenCred.token = resp.Token
|
|
||||||
c.tokenCred.tokenMu.Unlock()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
|
||||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
// of the provided endpoint determines the scheme used for all endpoints of the client connection.
|
||||||
host := getHost(endpoint)
|
func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||||
|
_, host, _ := endpoint.ParseEndpoint(ep)
|
||||||
|
target := c.resolverGroup.Target(host)
|
||||||
|
creds := c.dialWithBalancerCreds(ep)
|
||||||
|
return c.dial(target, creds, dopts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dial configures and dials any grpc balancer target.
|
||||||
|
func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||||
|
opts, err := c.dialSetupOpts(creds, dopts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to configure dialer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if c.Username != "" && c.Password != "" {
|
if c.Username != "" && c.Password != "" {
|
||||||
c.tokenCred = &authTokenCredential{
|
c.authTokenBundle = credentials.NewBundle(credentials.Config{})
|
||||||
tokenMu: &sync.RWMutex{},
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := c.ctx
|
ctx, cancel := c.ctx, func() {}
|
||||||
if c.cfg.DialTimeout > 0 {
|
if c.cfg.DialTimeout > 0 {
|
||||||
cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
|
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||||
defer cancel()
|
|
||||||
ctx = cctx
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.getToken(ctx)
|
err = c.getToken(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||||
err = context.DeadlineExceeded
|
err = context.DeadlineExceeded
|
||||||
}
|
}
|
||||||
|
cancel()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
|
||||||
}
|
}
|
||||||
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
opts = append(opts, c.cfg.DialOptions...)
|
opts = append(opts, c.cfg.DialOptions...)
|
||||||
|
|
||||||
conn, err := grpc.DialContext(c.ctx, host, opts...)
|
dctx := c.ctx
|
||||||
|
if c.cfg.DialTimeout > 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
|
||||||
|
defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := grpc.DialContext(dctx, target, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials {
|
||||||
|
_, hostPort, scheme := endpoint.ParseEndpoint(ep)
|
||||||
|
creds := c.creds
|
||||||
|
if len(scheme) != 0 {
|
||||||
|
creds = c.processCreds(scheme)
|
||||||
|
if creds != nil {
|
||||||
|
clone := creds.Clone()
|
||||||
|
// Set the server name must to the endpoint hostname without port since grpc
|
||||||
|
// otherwise attempts to check if x509 cert is valid for the full endpoint
|
||||||
|
// including the scheme and port, which fails.
|
||||||
|
host, _ := endpoint.ParseHostPort(hostPort)
|
||||||
|
clone.OverrideServerName(host)
|
||||||
|
creds = clone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return creds
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
|
||||||
|
_, _, scheme := endpoint.ParseEndpoint(ep)
|
||||||
|
creds := c.creds
|
||||||
|
if len(scheme) != 0 {
|
||||||
|
creds = c.processCreds(scheme)
|
||||||
|
}
|
||||||
|
return creds
|
||||||
|
}
|
||||||
|
|
||||||
// WithRequireLeader requires client requests to only succeed
|
// WithRequireLeader requires client requests to only succeed
|
||||||
// when the cluster has a leader.
|
// when the cluster has a leader.
|
||||||
func WithRequireLeader(ctx context.Context) context.Context {
|
func WithRequireLeader(ctx context.Context) context.Context {
|
||||||
@@ -369,10 +403,9 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
cfg = &Config{}
|
cfg = &Config{}
|
||||||
}
|
}
|
||||||
var creds *credentials.TransportCredentials
|
var creds grpccredentials.TransportCredentials
|
||||||
if cfg.TLS != nil {
|
if cfg.TLS != nil {
|
||||||
c := credentials.NewTLS(cfg.TLS)
|
creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
|
||||||
creds = &c
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// use a temporary skeleton client to bootstrap first connection
|
// use a temporary skeleton client to bootstrap first connection
|
||||||
@@ -384,7 +417,6 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
ctx, cancel := context.WithCancel(baseCtx)
|
ctx, cancel := context.WithCancel(baseCtx)
|
||||||
client := &Client{
|
client := &Client{
|
||||||
conn: nil,
|
conn: nil,
|
||||||
dialerrc: make(chan error, 1),
|
|
||||||
cfg: *cfg,
|
cfg: *cfg,
|
||||||
creds: creds,
|
creds: creds,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
@@ -392,6 +424,17 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
mu: new(sync.RWMutex),
|
mu: new(sync.RWMutex),
|
||||||
callOpts: defaultCallOpts,
|
callOpts: defaultCallOpts,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lcfg := logutil.DefaultZapLoggerConfig
|
||||||
|
if cfg.LogConfig != nil {
|
||||||
|
lcfg = *cfg.LogConfig
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
client.lg, err = lcfg.Build()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.Username != "" && cfg.Password != "" {
|
if cfg.Username != "" && cfg.Password != "" {
|
||||||
client.Username = cfg.Username
|
client.Username = cfg.Username
|
||||||
client.Password = cfg.Password
|
client.Password = cfg.Password
|
||||||
@@ -414,42 +457,31 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
client.callOpts = callOpts
|
client.callOpts = callOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
|
// Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
|
||||||
return grpcHealthCheck(client, ep)
|
// to dial so the client knows to use this resolver.
|
||||||
})
|
client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
|
||||||
|
|
||||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
|
||||||
// grpc will assume the certificate server name is the endpoint host.
|
|
||||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
client.cancel()
|
client.cancel()
|
||||||
client.balancer.Close()
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
client.conn = conn
|
client.resolverGroup.SetEndpoints(cfg.Endpoints)
|
||||||
|
|
||||||
// wait for a connection
|
if len(cfg.Endpoints) < 1 {
|
||||||
if cfg.DialTimeout > 0 {
|
return nil, fmt.Errorf("at least one Endpoint must is required in client config")
|
||||||
hasConn := false
|
|
||||||
waitc := time.After(cfg.DialTimeout)
|
|
||||||
select {
|
|
||||||
case <-client.balancer.ready():
|
|
||||||
hasConn = true
|
|
||||||
case <-ctx.Done():
|
|
||||||
case <-waitc:
|
|
||||||
}
|
|
||||||
if !hasConn {
|
|
||||||
err := context.DeadlineExceeded
|
|
||||||
select {
|
|
||||||
case err = <-client.dialerrc:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
client.cancel()
|
|
||||||
client.balancer.Close()
|
|
||||||
conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
dialEndpoint := cfg.Endpoints[0]
|
||||||
|
|
||||||
|
// Use a provided endpoint target so that for https:// without any tls config given, then
|
||||||
|
// grpc will assume the certificate server name is the endpoint host.
|
||||||
|
conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
|
||||||
|
if err != nil {
|
||||||
|
client.cancel()
|
||||||
|
client.resolverGroup.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// TODO: With the old grpc balancer interface, we waited until the dial timeout
|
||||||
|
// for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
|
||||||
|
client.conn = conn
|
||||||
|
|
||||||
client.Cluster = NewCluster(client)
|
client.Cluster = NewCluster(client)
|
||||||
client.KV = NewKV(client)
|
client.KV = NewKV(client)
|
||||||
@@ -469,15 +501,35 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// roundRobinQuorumBackoff retries against quorum between each backoff.
|
||||||
|
// This is intended for use with a round robin load balancer.
|
||||||
|
func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
|
||||||
|
return func(attempt uint) time.Duration {
|
||||||
|
// after each round robin across quorum, backoff for our wait between duration
|
||||||
|
n := uint(len(c.Endpoints()))
|
||||||
|
quorum := (n/2 + 1)
|
||||||
|
if attempt%quorum == 0 {
|
||||||
|
c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
|
||||||
|
return jitterUp(waitBetween, jitterFraction)
|
||||||
|
}
|
||||||
|
c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Client) checkVersion() (err error) {
|
func (c *Client) checkVersion() (err error) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
errc := make(chan error, len(c.cfg.Endpoints))
|
|
||||||
|
eps := c.Endpoints()
|
||||||
|
errc := make(chan error, len(eps))
|
||||||
ctx, cancel := context.WithCancel(c.ctx)
|
ctx, cancel := context.WithCancel(c.ctx)
|
||||||
if c.cfg.DialTimeout > 0 {
|
if c.cfg.DialTimeout > 0 {
|
||||||
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
cancel()
|
||||||
|
ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
|
||||||
}
|
}
|
||||||
wg.Add(len(c.cfg.Endpoints))
|
|
||||||
for _, ep := range c.cfg.Endpoints {
|
wg.Add(len(eps))
|
||||||
|
for _, ep := range eps {
|
||||||
// if cluster is current, any endpoint gives a recent version
|
// if cluster is current, any endpoint gives a recent version
|
||||||
go func(e string) {
|
go func(e string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -489,8 +541,15 @@ func (c *Client) checkVersion() (err error) {
|
|||||||
vs := strings.Split(resp.Version, ".")
|
vs := strings.Split(resp.Version, ".")
|
||||||
maj, min := 0, 0
|
maj, min := 0, 0
|
||||||
if len(vs) >= 2 {
|
if len(vs) >= 2 {
|
||||||
maj, _ = strconv.Atoi(vs[0])
|
var serr error
|
||||||
min, rerr = strconv.Atoi(vs[1])
|
if maj, serr = strconv.Atoi(vs[0]); serr != nil {
|
||||||
|
errc <- serr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if min, serr = strconv.Atoi(vs[1]); serr != nil {
|
||||||
|
errc <- serr
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if maj < 3 || (maj == 3 && min < 2) {
|
if maj < 3 || (maj == 3 && min < 2) {
|
||||||
rerr = ErrOldCluster
|
rerr = ErrOldCluster
|
||||||
@@ -499,7 +558,7 @@ func (c *Client) checkVersion() (err error) {
|
|||||||
}(ep)
|
}(ep)
|
||||||
}
|
}
|
||||||
// wait for success
|
// wait for success
|
||||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
for range eps {
|
||||||
if err = <-errc; err == nil {
|
if err = <-errc; err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -539,10 +598,13 @@ func isUnavailableErr(ctx context.Context, err error) bool {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
ev, _ := status.FromError(err)
|
ev, ok := status.FromError(err)
|
||||||
// Unavailable codes mean the system will be right back.
|
if ok {
|
||||||
// (e.g., can't connect, lost leader)
|
// Unavailable codes mean the system will be right back.
|
||||||
return ev.Code() == codes.Unavailable
|
// (e.g., can't connect, lost leader)
|
||||||
|
return ev.Code() == codes.Unavailable
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func toErr(ctx context.Context, err error) error {
|
func toErr(ctx context.Context, err error) error {
|
||||||
@@ -553,18 +615,16 @@ func toErr(ctx context.Context, err error) error {
|
|||||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ev, _ := status.FromError(err)
|
if ev, ok := status.FromError(err); ok {
|
||||||
code := ev.Code()
|
code := ev.Code()
|
||||||
switch code {
|
switch code {
|
||||||
case codes.DeadlineExceeded:
|
case codes.DeadlineExceeded:
|
||||||
fallthrough
|
fallthrough
|
||||||
case codes.Canceled:
|
case codes.Canceled:
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
err = ctx.Err()
|
err = ctx.Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case codes.Unavailable:
|
|
||||||
case codes.FailedPrecondition:
|
|
||||||
err = grpc.ErrClientConnClosing
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -576,3 +636,26 @@ func canceledByCaller(stopCtx context.Context, err error) bool {
|
|||||||
|
|
||||||
return err == context.Canceled || err == context.DeadlineExceeded
|
return err == context.Canceled || err == context.DeadlineExceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsConnCanceled returns true, if error is from a closed gRPC connection.
|
||||||
|
// ref. https://github.com/grpc/grpc-go/pull/1854
|
||||||
|
func IsConnCanceled(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// >= gRPC v1.23.x
|
||||||
|
s, ok := status.FromError(err)
|
||||||
|
if ok {
|
||||||
|
// connection is canceled or server has already closed the connection
|
||||||
|
return s.Code() == codes.Canceled || s.Message() == "transport is closing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// >= gRPC v1.10.x
|
||||||
|
if err == context.Canceled {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
|
||||||
|
return strings.Contains(err.Error(), "grpc: the client connection is closing")
|
||||||
|
}
|
||||||
|
|||||||
1
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
@@ -49,6 +49,7 @@ func NewElection(s *Session, pfx string) *Election {
|
|||||||
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
|
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
|
||||||
return &Election{
|
return &Election{
|
||||||
session: s,
|
session: s,
|
||||||
|
keyPrefix: pfx,
|
||||||
leaderKey: leaderKey,
|
leaderKey: leaderKey,
|
||||||
leaderRev: leaderRev,
|
leaderRev: leaderRev,
|
||||||
leaderSession: s,
|
leaderSession: s,
|
||||||
|
|||||||
11
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
11
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -67,9 +68,19 @@ type Config struct {
|
|||||||
RejectOldCluster bool `json:"reject-old-cluster"`
|
RejectOldCluster bool `json:"reject-old-cluster"`
|
||||||
|
|
||||||
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
||||||
|
// For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
|
||||||
|
// Without this, Dial returns immediately and connecting the server happens in background.
|
||||||
DialOptions []grpc.DialOption
|
DialOptions []grpc.DialOption
|
||||||
|
|
||||||
|
// LogConfig configures client-side logger.
|
||||||
|
// If nil, use the default logger.
|
||||||
|
// TODO: configure gRPC logger
|
||||||
|
LogConfig *zap.Config
|
||||||
|
|
||||||
// Context is the default client context; it can be used to cancel grpc dial out and
|
// Context is the default client context; it can be used to cancel grpc dial out and
|
||||||
// other operations that do not have an explicit context.
|
// other operations that do not have an explicit context.
|
||||||
Context context.Context
|
Context context.Context
|
||||||
|
|
||||||
|
// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
|
||||||
|
PermitWithoutStream bool `json:"permit-without-stream"`
|
||||||
}
|
}
|
||||||
|
|||||||
27
vendor/github.com/coreos/etcd/clientv3/credentials/BUILD
generated
vendored
Normal file
27
vendor/github.com/coreos/etcd/clientv3/credentials/BUILD
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["credentials.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/clientv3/credentials",
|
||||||
|
importpath = "github.com/coreos/etcd/clientv3/credentials",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
155
vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
generated
vendored
Normal file
155
vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
// Copyright 2019 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package credentials implements gRPC credential interface with etcd specific logic.
|
||||||
|
// e.g., client handshake with custom authority parameter
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
grpccredentials "google.golang.org/grpc/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config defines gRPC credential configuration.
|
||||||
|
type Config struct {
|
||||||
|
TLSConfig *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bundle defines gRPC credential interface.
|
||||||
|
type Bundle interface {
|
||||||
|
grpccredentials.Bundle
|
||||||
|
UpdateAuthToken(token string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBundle constructs a new gRPC credential bundle.
|
||||||
|
func NewBundle(cfg Config) Bundle {
|
||||||
|
return &bundle{
|
||||||
|
tc: newTransportCredential(cfg.TLSConfig),
|
||||||
|
rc: newPerRPCCredential(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bundle implements "grpccredentials.Bundle" interface.
|
||||||
|
type bundle struct {
|
||||||
|
tc *transportCredential
|
||||||
|
rc *perRPCCredential
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
|
||||||
|
return b.tc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
|
||||||
|
return b.rc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
|
||||||
|
// no-op
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// transportCredential implements "grpccredentials.TransportCredentials" interface.
|
||||||
|
type transportCredential struct {
|
||||||
|
gtc grpccredentials.TransportCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTransportCredential(cfg *tls.Config) *transportCredential {
|
||||||
|
return &transportCredential{
|
||||||
|
gtc: grpccredentials.NewTLS(cfg),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
|
||||||
|
// Only overwrite when authority is an IP address!
|
||||||
|
// Let's say, a server runs SRV records on "etcd.local" that resolves
|
||||||
|
// to "m1.etcd.local", and its SAN field also includes "m1.etcd.local".
|
||||||
|
// But what if SAN does not include its resolved IP address (e.g. 127.0.0.1)?
|
||||||
|
// Then, the server should only authenticate using its DNS hostname "m1.etcd.local",
|
||||||
|
// instead of overwriting it with its IP address.
|
||||||
|
// And we do not overwrite "localhost" either. Only overwrite IP addresses!
|
||||||
|
if isIP(authority) {
|
||||||
|
target := rawConn.RemoteAddr().String()
|
||||||
|
if authority != target {
|
||||||
|
// When user dials with "grpc.WithDialer", "grpc.DialContext" "cc.parsedTarget"
|
||||||
|
// update only happens once. This is problematic, because when TLS is enabled,
|
||||||
|
// retries happen through "grpc.WithDialer" with static "cc.parsedTarget" from
|
||||||
|
// the initial dial call.
|
||||||
|
// If the server authenticates by IP addresses, we want to set a new endpoint as
|
||||||
|
// a new authority. Otherwise
|
||||||
|
// "transport: authentication handshake failed: x509: certificate is valid for 127.0.0.1, 192.168.121.180, not 192.168.223.156"
|
||||||
|
// when the new dial target is "192.168.121.180" whose certificate host name is also "192.168.121.180"
|
||||||
|
// but client tries to authenticate with previously set "cc.parsedTarget" field "192.168.223.156"
|
||||||
|
authority = target
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tc.gtc.ClientHandshake(ctx, authority, rawConn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// return true if given string is an IP.
|
||||||
|
func isIP(ep string) bool {
|
||||||
|
return net.ParseIP(ep) != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
|
||||||
|
return tc.gtc.ServerHandshake(rawConn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
|
||||||
|
return tc.gtc.Info()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
|
||||||
|
return &transportCredential{
|
||||||
|
gtc: tc.gtc.Clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
|
||||||
|
return tc.gtc.OverrideServerName(serverNameOverride)
|
||||||
|
}
|
||||||
|
|
||||||
|
// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
|
||||||
|
type perRPCCredential struct {
|
||||||
|
authToken string
|
||||||
|
authTokenMu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
|
||||||
|
|
||||||
|
func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
|
||||||
|
|
||||||
|
func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||||
|
rc.authTokenMu.RLock()
|
||||||
|
authToken := rc.authToken
|
||||||
|
rc.authTokenMu.RUnlock()
|
||||||
|
return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bundle) UpdateAuthToken(token string) {
|
||||||
|
if b.rc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.rc.UpdateAuthToken(token)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *perRPCCredential) UpdateAuthToken(token string) {
|
||||||
|
rc.authTokenMu.Lock()
|
||||||
|
rc.authToken = token
|
||||||
|
rc.authTokenMu.Unlock()
|
||||||
|
}
|
||||||
609
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
609
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
@@ -1,609 +0,0 @@
|
|||||||
// Copyright 2017 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package clientv3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
minHealthRetryDuration = 3 * time.Second
|
|
||||||
unknownService = "unknown service grpc.health.v1.Health"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
|
||||||
// any active connection to endpoints at the time.
|
|
||||||
// This error is returned only when opts.BlockingWait is true.
|
|
||||||
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
|
|
||||||
|
|
||||||
type healthCheckFunc func(ep string) (bool, error)
|
|
||||||
|
|
||||||
type notifyMsg int
|
|
||||||
|
|
||||||
const (
|
|
||||||
notifyReset notifyMsg = iota
|
|
||||||
notifyNext
|
|
||||||
)
|
|
||||||
|
|
||||||
// healthBalancer does the bare minimum to expose multiple eps
|
|
||||||
// to the grpc reconnection code path
|
|
||||||
type healthBalancer struct {
|
|
||||||
// addrs are the client's endpoint addresses for grpc
|
|
||||||
addrs []grpc.Address
|
|
||||||
|
|
||||||
// eps holds the raw endpoints from the client
|
|
||||||
eps []string
|
|
||||||
|
|
||||||
// notifyCh notifies grpc of the set of addresses for connecting
|
|
||||||
notifyCh chan []grpc.Address
|
|
||||||
|
|
||||||
// readyc closes once the first connection is up
|
|
||||||
readyc chan struct{}
|
|
||||||
readyOnce sync.Once
|
|
||||||
|
|
||||||
// healthCheck checks an endpoint's health.
|
|
||||||
healthCheck healthCheckFunc
|
|
||||||
healthCheckTimeout time.Duration
|
|
||||||
|
|
||||||
unhealthyMu sync.RWMutex
|
|
||||||
unhealthyHostPorts map[string]time.Time
|
|
||||||
|
|
||||||
// mu protects all fields below.
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
|
|
||||||
upc chan struct{}
|
|
||||||
|
|
||||||
// downc closes when grpc calls down() on pinAddr
|
|
||||||
downc chan struct{}
|
|
||||||
|
|
||||||
// stopc is closed to signal updateNotifyLoop should stop.
|
|
||||||
stopc chan struct{}
|
|
||||||
stopOnce sync.Once
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
// donec closes when all goroutines are exited
|
|
||||||
donec chan struct{}
|
|
||||||
|
|
||||||
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
|
||||||
updateAddrsC chan notifyMsg
|
|
||||||
|
|
||||||
// grpc issues TLS cert checks using the string passed into dial so
|
|
||||||
// that string must be the host. To recover the full scheme://host URL,
|
|
||||||
// have a map from hosts to the original endpoint.
|
|
||||||
hostPort2ep map[string]string
|
|
||||||
|
|
||||||
// pinAddr is the currently pinned address; set to the empty string on
|
|
||||||
// initialization and shutdown.
|
|
||||||
pinAddr string
|
|
||||||
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
|
|
||||||
notifyCh := make(chan []grpc.Address)
|
|
||||||
addrs := eps2addrs(eps)
|
|
||||||
hb := &healthBalancer{
|
|
||||||
addrs: addrs,
|
|
||||||
eps: eps,
|
|
||||||
notifyCh: notifyCh,
|
|
||||||
readyc: make(chan struct{}),
|
|
||||||
healthCheck: hc,
|
|
||||||
unhealthyHostPorts: make(map[string]time.Time),
|
|
||||||
upc: make(chan struct{}),
|
|
||||||
stopc: make(chan struct{}),
|
|
||||||
downc: make(chan struct{}),
|
|
||||||
donec: make(chan struct{}),
|
|
||||||
updateAddrsC: make(chan notifyMsg),
|
|
||||||
hostPort2ep: getHostPort2ep(eps),
|
|
||||||
}
|
|
||||||
if timeout < minHealthRetryDuration {
|
|
||||||
timeout = minHealthRetryDuration
|
|
||||||
}
|
|
||||||
hb.healthCheckTimeout = timeout
|
|
||||||
|
|
||||||
close(hb.downc)
|
|
||||||
go hb.updateNotifyLoop()
|
|
||||||
hb.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer hb.wg.Done()
|
|
||||||
hb.updateUnhealthy()
|
|
||||||
}()
|
|
||||||
return hb
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
|
||||||
|
|
||||||
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
return b.upc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
|
|
||||||
|
|
||||||
func (b *healthBalancer) endpoint(hostPort string) string {
|
|
||||||
b.mu.RLock()
|
|
||||||
defer b.mu.RUnlock()
|
|
||||||
return b.hostPort2ep[hostPort]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) pinned() string {
|
|
||||||
b.mu.RLock()
|
|
||||||
defer b.mu.RUnlock()
|
|
||||||
return b.pinAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) hostPortError(hostPort string, err error) {
|
|
||||||
if b.endpoint(hostPort) == "" {
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b.unhealthyMu.Lock()
|
|
||||||
b.unhealthyHostPorts[hostPort] = time.Now()
|
|
||||||
b.unhealthyMu.Unlock()
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
|
|
||||||
if b.endpoint(hostPort) == "" {
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b.unhealthyMu.Lock()
|
|
||||||
delete(b.unhealthyHostPorts, hostPort)
|
|
||||||
b.unhealthyMu.Unlock()
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) countUnhealthy() (count int) {
|
|
||||||
b.unhealthyMu.RLock()
|
|
||||||
count = len(b.unhealthyHostPorts)
|
|
||||||
b.unhealthyMu.RUnlock()
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
|
|
||||||
b.unhealthyMu.RLock()
|
|
||||||
_, unhealthy = b.unhealthyHostPorts[hostPort]
|
|
||||||
b.unhealthyMu.RUnlock()
|
|
||||||
return unhealthy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) cleanupUnhealthy() {
|
|
||||||
b.unhealthyMu.Lock()
|
|
||||||
for k, v := range b.unhealthyHostPorts {
|
|
||||||
if time.Since(v) > b.healthCheckTimeout {
|
|
||||||
delete(b.unhealthyHostPorts, k)
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.unhealthyMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
|
|
||||||
unhealthyCnt := b.countUnhealthy()
|
|
||||||
|
|
||||||
b.mu.RLock()
|
|
||||||
defer b.mu.RUnlock()
|
|
||||||
|
|
||||||
hbAddrs := b.addrs
|
|
||||||
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
|
|
||||||
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
|
|
||||||
for k := range b.hostPort2ep {
|
|
||||||
liveHostPorts[k] = struct{}{}
|
|
||||||
}
|
|
||||||
return hbAddrs, liveHostPorts
|
|
||||||
}
|
|
||||||
|
|
||||||
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
|
|
||||||
liveHostPorts := make(map[string]struct{}, len(addrs))
|
|
||||||
for _, addr := range b.addrs {
|
|
||||||
if !b.isUnhealthy(addr.Addr) {
|
|
||||||
addrs = append(addrs, addr)
|
|
||||||
liveHostPorts[addr.Addr] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return addrs, liveHostPorts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) updateUnhealthy() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-time.After(b.healthCheckTimeout):
|
|
||||||
b.cleanupUnhealthy()
|
|
||||||
pinned := b.pinned()
|
|
||||||
if pinned == "" || b.isUnhealthy(pinned) {
|
|
||||||
select {
|
|
||||||
case b.updateAddrsC <- notifyNext:
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) updateAddrs(eps ...string) {
|
|
||||||
np := getHostPort2ep(eps)
|
|
||||||
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
match := len(np) == len(b.hostPort2ep)
|
|
||||||
if match {
|
|
||||||
for k, v := range np {
|
|
||||||
if b.hostPort2ep[k] != v {
|
|
||||||
match = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if match {
|
|
||||||
// same endpoints, so no need to update address
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b.hostPort2ep = np
|
|
||||||
b.addrs, b.eps = eps2addrs(eps), eps
|
|
||||||
|
|
||||||
b.unhealthyMu.Lock()
|
|
||||||
b.unhealthyHostPorts = make(map[string]time.Time)
|
|
||||||
b.unhealthyMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) next() {
|
|
||||||
b.mu.RLock()
|
|
||||||
downc := b.downc
|
|
||||||
b.mu.RUnlock()
|
|
||||||
select {
|
|
||||||
case b.updateAddrsC <- notifyNext:
|
|
||||||
case <-b.stopc:
|
|
||||||
}
|
|
||||||
// wait until disconnect so new RPCs are not issued on old connection
|
|
||||||
select {
|
|
||||||
case <-downc:
|
|
||||||
case <-b.stopc:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) updateNotifyLoop() {
|
|
||||||
defer close(b.donec)
|
|
||||||
|
|
||||||
for {
|
|
||||||
b.mu.RLock()
|
|
||||||
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
|
||||||
b.mu.RUnlock()
|
|
||||||
// downc or upc should be closed
|
|
||||||
select {
|
|
||||||
case <-downc:
|
|
||||||
downc = nil
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-upc:
|
|
||||||
upc = nil
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case downc == nil && upc == nil:
|
|
||||||
// stale
|
|
||||||
select {
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
case downc == nil:
|
|
||||||
b.notifyAddrs(notifyReset)
|
|
||||||
select {
|
|
||||||
case <-upc:
|
|
||||||
case msg := <-b.updateAddrsC:
|
|
||||||
b.notifyAddrs(msg)
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case upc == nil:
|
|
||||||
select {
|
|
||||||
// close connections that are not the pinned address
|
|
||||||
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
|
||||||
case <-downc:
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-downc:
|
|
||||||
b.notifyAddrs(notifyReset)
|
|
||||||
case msg := <-b.updateAddrsC:
|
|
||||||
b.notifyAddrs(msg)
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
|
|
||||||
if msg == notifyNext {
|
|
||||||
select {
|
|
||||||
case b.notifyCh <- []grpc.Address{}:
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.mu.RLock()
|
|
||||||
pinAddr := b.pinAddr
|
|
||||||
downc := b.downc
|
|
||||||
b.mu.RUnlock()
|
|
||||||
addrs, hostPorts := b.liveAddrs()
|
|
||||||
|
|
||||||
var waitDown bool
|
|
||||||
if pinAddr != "" {
|
|
||||||
_, ok := hostPorts[pinAddr]
|
|
||||||
waitDown = !ok
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case b.notifyCh <- addrs:
|
|
||||||
if waitDown {
|
|
||||||
select {
|
|
||||||
case <-downc:
|
|
||||||
case <-b.stopc:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case <-b.stopc:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
|
||||||
if !b.mayPin(addr) {
|
|
||||||
return func(err error) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
// gRPC might call Up after it called Close. We add this check
|
|
||||||
// to "fix" it up at application layer. Otherwise, will panic
|
|
||||||
// if b.upc is already closed.
|
|
||||||
if b.closed {
|
|
||||||
return func(err error) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gRPC might call Up on a stale address.
|
|
||||||
// Prevent updating pinAddr with a stale address.
|
|
||||||
if !hasAddr(b.addrs, addr.Addr) {
|
|
||||||
return func(err error) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.pinAddr != "" {
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
|
|
||||||
return func(err error) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// notify waiting Get()s and pin first connected address
|
|
||||||
close(b.upc)
|
|
||||||
b.downc = make(chan struct{})
|
|
||||||
b.pinAddr = addr.Addr
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr)
|
|
||||||
|
|
||||||
// notify client that a connection is up
|
|
||||||
b.readyOnce.Do(func() { close(b.readyc) })
|
|
||||||
|
|
||||||
return func(err error) {
|
|
||||||
// If connected to a black hole endpoint or a killed server, the gRPC ping
|
|
||||||
// timeout will induce a network I/O error, and retrying until success;
|
|
||||||
// finding healthy endpoint on retry could take several timeouts and redials.
|
|
||||||
// To avoid wasting retries, gray-list unhealthy endpoints.
|
|
||||||
b.hostPortError(addr.Addr, err)
|
|
||||||
|
|
||||||
b.mu.Lock()
|
|
||||||
b.upc = make(chan struct{})
|
|
||||||
close(b.downc)
|
|
||||||
b.pinAddr = ""
|
|
||||||
b.mu.Unlock()
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
|
|
||||||
if b.endpoint(addr.Addr) == "" { // stale host:port
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
b.unhealthyMu.RLock()
|
|
||||||
unhealthyCnt := len(b.unhealthyHostPorts)
|
|
||||||
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
|
|
||||||
b.unhealthyMu.RUnlock()
|
|
||||||
|
|
||||||
b.mu.RLock()
|
|
||||||
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
|
|
||||||
b.mu.RUnlock()
|
|
||||||
if skip || !bad {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// prevent isolated member's endpoint from being infinitely retried, as follows:
|
|
||||||
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
|
|
||||||
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
|
|
||||||
// 3. grpc-healthcheck still SERVING, thus retry to pin
|
|
||||||
// instead, return before grpc-healthcheck if failed within healthcheck timeout
|
|
||||||
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
|
|
||||||
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok, _ := b.healthCheck(addr.Addr); ok {
|
|
||||||
b.removeUnhealthy(addr.Addr, "health check success")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
b.hostPortError(addr.Addr, errors.New("health check failed"))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
|
||||||
var (
|
|
||||||
addr string
|
|
||||||
closed bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
|
||||||
// an address it has notified via Notify immediately instead of blocking.
|
|
||||||
if !opts.BlockingWait {
|
|
||||||
b.mu.RLock()
|
|
||||||
closed = b.closed
|
|
||||||
addr = b.pinAddr
|
|
||||||
b.mu.RUnlock()
|
|
||||||
if closed {
|
|
||||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
|
||||||
}
|
|
||||||
if addr == "" {
|
|
||||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
|
||||||
}
|
|
||||||
return grpc.Address{Addr: addr}, func() {}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
b.mu.RLock()
|
|
||||||
ch := b.upc
|
|
||||||
b.mu.RUnlock()
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
case <-b.donec:
|
|
||||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
|
||||||
case <-ctx.Done():
|
|
||||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
|
||||||
}
|
|
||||||
b.mu.RLock()
|
|
||||||
closed = b.closed
|
|
||||||
addr = b.pinAddr
|
|
||||||
b.mu.RUnlock()
|
|
||||||
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
|
||||||
if closed {
|
|
||||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
|
||||||
}
|
|
||||||
if addr != "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return grpc.Address{Addr: addr}, func() {}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
|
||||||
|
|
||||||
func (b *healthBalancer) Close() error {
|
|
||||||
b.mu.Lock()
|
|
||||||
// In case gRPC calls close twice. TODO: remove the checking
|
|
||||||
// when we are sure that gRPC wont call close twice.
|
|
||||||
if b.closed {
|
|
||||||
b.mu.Unlock()
|
|
||||||
<-b.donec
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
b.closed = true
|
|
||||||
b.stopOnce.Do(func() { close(b.stopc) })
|
|
||||||
b.pinAddr = ""
|
|
||||||
|
|
||||||
// In the case of following scenario:
|
|
||||||
// 1. upc is not closed; no pinned address
|
|
||||||
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
|
|
||||||
// 3. client.conn.Close() calls balancer.Close(); closed = true
|
|
||||||
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
|
||||||
// we must close upc so Get() exits from blocking on upc
|
|
||||||
select {
|
|
||||||
case <-b.upc:
|
|
||||||
default:
|
|
||||||
// terminate all waiting Get()s
|
|
||||||
close(b.upc)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.mu.Unlock()
|
|
||||||
b.wg.Wait()
|
|
||||||
|
|
||||||
// wait for updateNotifyLoop to finish
|
|
||||||
<-b.donec
|
|
||||||
close(b.notifyCh)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func grpcHealthCheck(client *Client, ep string) (bool, error) {
|
|
||||||
conn, err := client.dial(ep)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
cli := healthpb.NewHealthClient(conn)
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
|
|
||||||
cancel()
|
|
||||||
if err != nil {
|
|
||||||
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
|
|
||||||
if s.Message() == unknownService { // etcd < v3.3.0
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
|
||||||
for _, addr := range addrs {
|
|
||||||
if targetAddr == addr.Addr {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func getHost(ep string) string {
|
|
||||||
url, uerr := url.Parse(ep)
|
|
||||||
if uerr != nil || !strings.Contains(ep, "://") {
|
|
||||||
return ep
|
|
||||||
}
|
|
||||||
return url.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
func eps2addrs(eps []string) []grpc.Address {
|
|
||||||
addrs := make([]grpc.Address, len(eps))
|
|
||||||
for i := range eps {
|
|
||||||
addrs[i].Addr = getHost(eps[i])
|
|
||||||
}
|
|
||||||
return addrs
|
|
||||||
}
|
|
||||||
|
|
||||||
func getHostPort2ep(eps []string) map[string]string {
|
|
||||||
hm := make(map[string]string, len(eps))
|
|
||||||
for i := range eps {
|
|
||||||
_, host, _ := parseEndpoint(eps[i])
|
|
||||||
hm[host] = eps[i]
|
|
||||||
}
|
|
||||||
return hm
|
|
||||||
}
|
|
||||||
70
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
70
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
@@ -18,28 +18,14 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/logutil"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Logger is the logger used by client library.
|
|
||||||
// It implements grpclog.LoggerV2 interface.
|
|
||||||
type Logger interface {
|
|
||||||
grpclog.LoggerV2
|
|
||||||
|
|
||||||
// Lvl returns logger if logger's verbosity level >= "lvl".
|
|
||||||
// Otherwise, logger that discards all logs.
|
|
||||||
Lvl(lvl int) Logger
|
|
||||||
|
|
||||||
// to satisfy capnslog
|
|
||||||
|
|
||||||
Print(args ...interface{})
|
|
||||||
Printf(format string, args ...interface{})
|
|
||||||
Println(args ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
loggerMu sync.RWMutex
|
lgMu sync.RWMutex
|
||||||
logger Logger
|
lg logutil.Logger
|
||||||
)
|
)
|
||||||
|
|
||||||
type settableLogger struct {
|
type settableLogger struct {
|
||||||
@@ -49,29 +35,29 @@ type settableLogger struct {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// disable client side logs by default
|
// disable client side logs by default
|
||||||
logger = &settableLogger{}
|
lg = &settableLogger{}
|
||||||
SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLogger sets client-side Logger.
|
// SetLogger sets client-side Logger.
|
||||||
func SetLogger(l grpclog.LoggerV2) {
|
func SetLogger(l grpclog.LoggerV2) {
|
||||||
loggerMu.Lock()
|
lgMu.Lock()
|
||||||
logger = NewLogger(l)
|
lg = logutil.NewLogger(l)
|
||||||
// override grpclog so that any changes happen with locking
|
// override grpclog so that any changes happen with locking
|
||||||
grpclog.SetLoggerV2(logger)
|
grpclog.SetLoggerV2(lg)
|
||||||
loggerMu.Unlock()
|
lgMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLogger returns the current logger.
|
// GetLogger returns the current logutil.Logger.
|
||||||
func GetLogger() Logger {
|
func GetLogger() logutil.Logger {
|
||||||
loggerMu.RLock()
|
lgMu.RLock()
|
||||||
l := logger
|
l := lg
|
||||||
loggerMu.RUnlock()
|
lgMu.RUnlock()
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLogger returns a new Logger with grpclog.LoggerV2.
|
// NewLogger returns a new Logger with logutil.Logger.
|
||||||
func NewLogger(gl grpclog.LoggerV2) Logger {
|
func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
|
||||||
return &settableLogger{l: gl}
|
return &settableLogger{l: gl}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,32 +90,12 @@ func (s *settableLogger) Print(args ...interface{}) { s.get().In
|
|||||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
|
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
|
||||||
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
|
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
|
||||||
func (s *settableLogger) Lvl(lvl int) Logger {
|
func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
l := s.l
|
l := s.l
|
||||||
s.mu.RUnlock()
|
s.mu.RUnlock()
|
||||||
if l.V(lvl) {
|
if l.V(lvl) {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
return &noLogger{}
|
return logutil.NewDiscardLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
type noLogger struct{}
|
|
||||||
|
|
||||||
func (*noLogger) Info(args ...interface{}) {}
|
|
||||||
func (*noLogger) Infof(format string, args ...interface{}) {}
|
|
||||||
func (*noLogger) Infoln(args ...interface{}) {}
|
|
||||||
func (*noLogger) Warning(args ...interface{}) {}
|
|
||||||
func (*noLogger) Warningf(format string, args ...interface{}) {}
|
|
||||||
func (*noLogger) Warningln(args ...interface{}) {}
|
|
||||||
func (*noLogger) Error(args ...interface{}) {}
|
|
||||||
func (*noLogger) Errorf(format string, args ...interface{}) {}
|
|
||||||
func (*noLogger) Errorln(args ...interface{}) {}
|
|
||||||
func (*noLogger) Fatal(args ...interface{}) {}
|
|
||||||
func (*noLogger) Fatalf(format string, args ...interface{}) {}
|
|
||||||
func (*noLogger) Fatalln(args ...interface{}) {}
|
|
||||||
func (*noLogger) Print(args ...interface{}) {}
|
|
||||||
func (*noLogger) Printf(format string, args ...interface{}) {}
|
|
||||||
func (*noLogger) Println(args ...interface{}) {}
|
|
||||||
func (*noLogger) V(l int) bool { return false }
|
|
||||||
func (ng *noLogger) Lvl(lvl int) Logger { return ng }
|
|
||||||
|
|||||||
10
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
10
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
@@ -16,6 +16,7 @@ package clientv3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
@@ -57,6 +58,8 @@ type Maintenance interface {
|
|||||||
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
|
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
|
||||||
|
|
||||||
// Snapshot provides a reader for a point-in-time snapshot of etcd.
|
// Snapshot provides a reader for a point-in-time snapshot of etcd.
|
||||||
|
// If the context "ctx" is canceled or timed out, reading from returned
|
||||||
|
// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
|
||||||
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||||
|
|
||||||
// MoveLeader requests current leader to transfer its leadership to the transferee.
|
// MoveLeader requests current leader to transfer its leadership to the transferee.
|
||||||
@@ -73,9 +76,9 @@ type maintenance struct {
|
|||||||
func NewMaintenance(c *Client) Maintenance {
|
func NewMaintenance(c *Client) Maintenance {
|
||||||
api := &maintenance{
|
api := &maintenance{
|
||||||
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
||||||
conn, err := c.dial(endpoint)
|
conn, err := c.Dial(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
|
||||||
}
|
}
|
||||||
cancel := func() { conn.Close() }
|
cancel := func() { conn.Close() }
|
||||||
return RetryMaintenanceClient(c, conn), cancel, nil
|
return RetryMaintenanceClient(c, conn), cancel, nil
|
||||||
@@ -173,6 +176,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
|||||||
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
|
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
|
||||||
remote, cancel, err := m.dial(endpoint)
|
remote, cancel, err := m.dial(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@@ -184,7 +188,7 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
|
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|||||||
17
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
17
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
@@ -53,6 +53,12 @@ type Op struct {
|
|||||||
// for watch, put, delete
|
// for watch, put, delete
|
||||||
prevKV bool
|
prevKV bool
|
||||||
|
|
||||||
|
// for watch
|
||||||
|
// fragmentation should be disabled by default
|
||||||
|
// if true, split watch events when total exceeds
|
||||||
|
// "--max-request-bytes" flag value + 512-byte
|
||||||
|
fragment bool
|
||||||
|
|
||||||
// for put
|
// for put
|
||||||
ignoreValue bool
|
ignoreValue bool
|
||||||
ignoreLease bool
|
ignoreLease bool
|
||||||
@@ -511,3 +517,14 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi
|
|||||||
ret.applyOpts(opts)
|
ret.applyOpts(opts)
|
||||||
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithFragment to receive raw watch response with fragmentation.
|
||||||
|
// Fragmentation is disabled by default. If fragmentation is enabled,
|
||||||
|
// etcd watch server will split watch response before sending to clients
|
||||||
|
// when the total size of watch events exceed server-side request limit.
|
||||||
|
// The default server-side request limit is 1.5 MiB, which can be configured
|
||||||
|
// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
|
||||||
|
// See "etcdserver/api/v3rpc/watch.go" for more details.
|
||||||
|
func WithFragment() OpOption {
|
||||||
|
return func(op *Op) { op.fragment = true }
|
||||||
|
}
|
||||||
|
|||||||
28
vendor/github.com/coreos/etcd/clientv3/options.go
generated
vendored
28
vendor/github.com/coreos/etcd/clientv3/options.go
generated
vendored
@@ -16,17 +16,17 @@ package clientv3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Disable gRPC internal retrial logic
|
// client-side handling retrying of request failures where data was not written to the wire or
|
||||||
// TODO: enable when gRPC retry is stable (FailFast=false)
|
// where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
|
||||||
// Reference:
|
// but for etcd we default to "FailFast(false)" to minimize client request error responses due to
|
||||||
// - https://github.com/grpc/grpc-go/issues/1532
|
// transient failures.
|
||||||
// - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
|
defaultFailFast = grpc.FailFast(false)
|
||||||
defaultFailFast = grpc.FailFast(true)
|
|
||||||
|
|
||||||
// client-side request send limit, gRPC default is math.MaxInt32
|
// client-side request send limit, gRPC default is math.MaxInt32
|
||||||
// Make sure that "client-side send limit < server-side default send/recv limit"
|
// Make sure that "client-side send limit < server-side default send/recv limit"
|
||||||
@@ -38,6 +38,22 @@ var (
|
|||||||
// because range response can easily exceed request send limits
|
// because range response can easily exceed request send limits
|
||||||
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
|
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
|
||||||
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
|
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
|
||||||
|
|
||||||
|
// client-side non-streaming retry limit, only applied to requests where server responds with
|
||||||
|
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
|
||||||
|
// If set to 0, retry is disabled.
|
||||||
|
defaultUnaryMaxRetries uint = 100
|
||||||
|
|
||||||
|
// client-side streaming retry limit, only applied to requests where server responds with
|
||||||
|
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
|
||||||
|
// If set to 0, retry is disabled.
|
||||||
|
defaultStreamMaxRetries = ^uint(0) // max uint
|
||||||
|
|
||||||
|
// client-side retry backoff wait between requests.
|
||||||
|
defaultBackoffWaitBetween = 25 * time.Millisecond
|
||||||
|
|
||||||
|
// client-side retry backoff default jitter fraction.
|
||||||
|
defaultBackoffJitterFraction = 0.10
|
||||||
)
|
)
|
||||||
|
|
||||||
// defaultCallOpts defines a list of default "gRPC.CallOption".
|
// defaultCallOpts defines a list of default "gRPC.CallOption".
|
||||||
|
|||||||
376
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
376
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
@@ -32,465 +32,263 @@ const (
|
|||||||
nonRepeatable
|
nonRepeatable
|
||||||
)
|
)
|
||||||
|
|
||||||
type rpcFunc func(ctx context.Context) error
|
func (rp retryPolicy) String() string {
|
||||||
type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
|
switch rp {
|
||||||
type retryStopErrFunc func(error) bool
|
case repeatable:
|
||||||
|
return "repeatable"
|
||||||
|
case nonRepeatable:
|
||||||
|
return "nonRepeatable"
|
||||||
|
default:
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
|
||||||
|
//
|
||||||
// immutable requests (e.g. Get) should be retried unless it's
|
// immutable requests (e.g. Get) should be retried unless it's
|
||||||
// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
|
// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
|
||||||
//
|
//
|
||||||
// "isRepeatableStopError" returns "true" when an immutable request
|
// Returning "false" means retry should stop, since client cannot
|
||||||
// is interrupted by server-side or gRPC-side error and its status
|
|
||||||
// code is not transient (!= codes.Unavailable).
|
|
||||||
//
|
|
||||||
// Returning "true" means retry should stop, since client cannot
|
|
||||||
// handle itself even with retries.
|
// handle itself even with retries.
|
||||||
func isRepeatableStopError(err error) bool {
|
func isSafeRetryImmutableRPC(err error) bool {
|
||||||
eErr := rpctypes.Error(err)
|
eErr := rpctypes.Error(err)
|
||||||
// always stop retry on etcd errors
|
|
||||||
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
|
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
|
||||||
return true
|
// interrupted by non-transient server-side or gRPC-side error
|
||||||
|
// client cannot handle itself (e.g. rpctypes.ErrCompacted)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
// only retry if unavailable
|
// only retry if unavailable
|
||||||
ev, _ := status.FromError(err)
|
ev, ok := status.FromError(err)
|
||||||
return ev.Code() != codes.Unavailable
|
if !ok {
|
||||||
|
// all errors from RPC is typed "grpc/status.(*statusError)"
|
||||||
|
// (ref. https://github.com/grpc/grpc-go/pull/1782)
|
||||||
|
//
|
||||||
|
// if the error type is not "grpc/status.(*statusError)",
|
||||||
|
// it could be from "Dial"
|
||||||
|
// TODO: do not retry for now
|
||||||
|
// ref. https://github.com/grpc/grpc-go/issues/1581
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return ev.Code() == codes.Unavailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
|
||||||
|
//
|
||||||
// mutable requests (e.g. Put, Delete, Txn) should only be retried
|
// mutable requests (e.g. Put, Delete, Txn) should only be retried
|
||||||
// when the status code is codes.Unavailable when initial connection
|
// when the status code is codes.Unavailable when initial connection
|
||||||
// has not been established (no pinned endpoint).
|
// has not been established (no endpoint is up).
|
||||||
//
|
//
|
||||||
// "isNonRepeatableStopError" returns "true" when a mutable request
|
// Returning "false" means retry should stop, otherwise it violates
|
||||||
// is interrupted by non-transient error that client cannot handle itself,
|
|
||||||
// or transient error while the connection has already been established
|
|
||||||
// (pinned endpoint exists).
|
|
||||||
//
|
|
||||||
// Returning "true" means retry should stop, otherwise it violates
|
|
||||||
// write-at-most-once semantics.
|
// write-at-most-once semantics.
|
||||||
func isNonRepeatableStopError(err error) bool {
|
func isSafeRetryMutableRPC(err error) bool {
|
||||||
ev, _ := status.FromError(err)
|
if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
|
||||||
if ev.Code() != codes.Unavailable {
|
// not safe for mutable RPCs
|
||||||
return true
|
// e.g. interrupted by non-transient error that client cannot handle itself,
|
||||||
|
// or transient error while the connection has already been established
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
desc := rpctypes.ErrorDesc(err)
|
desc := rpctypes.ErrorDesc(err)
|
||||||
return desc != "there is no address available" && desc != "there is no connection available"
|
return desc == "there is no address available" || desc == "there is no connection available"
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) newRetryWrapper() retryRPCFunc {
|
|
||||||
return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
|
|
||||||
var isStop retryStopErrFunc
|
|
||||||
switch rp {
|
|
||||||
case repeatable:
|
|
||||||
isStop = isRepeatableStopError
|
|
||||||
case nonRepeatable:
|
|
||||||
isStop = isNonRepeatableStopError
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pinned := c.balancer.pinned()
|
|
||||||
err := f(rpcCtx)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
|
||||||
|
|
||||||
if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
|
|
||||||
// mark this before endpoint switch is triggered
|
|
||||||
c.balancer.hostPortError(pinned, err)
|
|
||||||
c.balancer.next()
|
|
||||||
logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if isStop(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc {
|
|
||||||
return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
|
|
||||||
for {
|
|
||||||
pinned := c.balancer.pinned()
|
|
||||||
err := retryf(rpcCtx, f, rp)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
|
||||||
// always stop retry on etcd errors other than invalid auth token
|
|
||||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
|
||||||
gterr := c.getToken(rpcCtx)
|
|
||||||
if gterr != nil {
|
|
||||||
logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
|
|
||||||
return err // return the original error for simplicity
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryKVClient struct {
|
type retryKVClient struct {
|
||||||
kc pb.KVClient
|
kc pb.KVClient
|
||||||
retryf retryRPCFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryKVClient implements a KVClient.
|
// RetryKVClient implements a KVClient.
|
||||||
func RetryKVClient(c *Client) pb.KVClient {
|
func RetryKVClient(c *Client) pb.KVClient {
|
||||||
return &retryKVClient{
|
return &retryKVClient{
|
||||||
kc: pb.NewKVClient(c.conn),
|
kc: pb.NewKVClient(c.conn),
|
||||||
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rkv.kc.Range(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
return rkv.kc.Put(ctx, in, opts...)
|
||||||
resp, err = rkv.kc.Put(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
return rkv.kc.DeleteRange(ctx, in, opts...)
|
||||||
resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||||
// TODO: "repeatable" for read-only txn
|
return rkv.kc.Txn(ctx, in, opts...)
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
|
||||||
resp, err = rkv.kc.Txn(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
return rkv.kc.Compact(ctx, in, opts...)
|
||||||
resp, err = rkv.kc.Compact(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryLeaseClient struct {
|
type retryLeaseClient struct {
|
||||||
lc pb.LeaseClient
|
lc pb.LeaseClient
|
||||||
retryf retryRPCFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryLeaseClient implements a LeaseClient.
|
// RetryLeaseClient implements a LeaseClient.
|
||||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||||
return &retryLeaseClient{
|
return &retryLeaseClient{
|
||||||
lc: pb.NewLeaseClient(c.conn),
|
lc: pb.NewLeaseClient(c.conn),
|
||||||
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
||||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rlc.lc.LeaseLeases(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
|
||||||
stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return stream, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryClusterClient struct {
|
type retryClusterClient struct {
|
||||||
cc pb.ClusterClient
|
cc pb.ClusterClient
|
||||||
retryf retryRPCFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryClusterClient implements a ClusterClient.
|
// RetryClusterClient implements a ClusterClient.
|
||||||
func RetryClusterClient(c *Client) pb.ClusterClient {
|
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||||
return &retryClusterClient{
|
return &retryClusterClient{
|
||||||
cc: pb.NewClusterClient(c.conn),
|
cc: pb.NewClusterClient(c.conn),
|
||||||
retryf: c.newRetryWrapper(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rcc.cc.MemberList(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
return rcc.cc.MemberAdd(ctx, in, opts...)
|
||||||
resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
return rcc.cc.MemberRemove(ctx, in, opts...)
|
||||||
resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
return rcc.cc.MemberUpdate(ctx, in, opts...)
|
||||||
resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryMaintenanceClient struct {
|
type retryMaintenanceClient struct {
|
||||||
mc pb.MaintenanceClient
|
mc pb.MaintenanceClient
|
||||||
retryf retryRPCFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryMaintenanceClient implements a Maintenance.
|
// RetryMaintenanceClient implements a Maintenance.
|
||||||
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
|
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
|
||||||
return &retryMaintenanceClient{
|
return &retryMaintenanceClient{
|
||||||
mc: pb.NewMaintenanceClient(conn),
|
mc: pb.NewMaintenanceClient(conn),
|
||||||
retryf: c.newRetryWrapper(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rmc.mc.Alarm(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rmc.mc.Status(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rmc.mc.Hash(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
||||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rmc.mc.HashKV(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return stream, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
||||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rmc.mc.MoveLeader(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
return rmc.mc.Defragment(ctx, in, opts...)
|
||||||
resp, err = rmc.mc.Defragment(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryAuthClient struct {
|
type retryAuthClient struct {
|
||||||
ac pb.AuthClient
|
ac pb.AuthClient
|
||||||
retryf retryRPCFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryAuthClient implements a AuthClient.
|
// RetryAuthClient implements a AuthClient.
|
||||||
func RetryAuthClient(c *Client) pb.AuthClient {
|
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||||
return &retryAuthClient{
|
return &retryAuthClient{
|
||||||
ac: pb.NewAuthClient(c.conn),
|
ac: pb.NewAuthClient(c.conn),
|
||||||
retryf: c.newRetryWrapper(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rac.ac.UserList(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rac.ac.UserGet(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rac.ac.RoleGet(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||||
resp, err = rac.ac.RoleList(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, repeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.AuthEnable(ctx, in, opts...)
|
||||||
resp, err = rac.ac.AuthEnable(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.AuthDisable(ctx, in, opts...)
|
||||||
resp, err = rac.ac.AuthDisable(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.UserAdd(ctx, in, opts...)
|
||||||
resp, err = rac.ac.UserAdd(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.UserDelete(ctx, in, opts...)
|
||||||
resp, err = rac.ac.UserDelete(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.UserChangePassword(ctx, in, opts...)
|
||||||
resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.UserGrantRole(ctx, in, opts...)
|
||||||
resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.UserRevokeRole(ctx, in, opts...)
|
||||||
resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.RoleAdd(ctx, in, opts...)
|
||||||
resp, err = rac.ac.RoleAdd(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.RoleDelete(ctx, in, opts...)
|
||||||
resp, err = rac.ac.RoleDelete(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.RoleGrantPermission(ctx, in, opts...)
|
||||||
resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.RoleRevokePermission(ctx, in, opts...)
|
||||||
resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
return rac.ac.Authenticate(ctx, in, opts...)
|
||||||
resp, err = rac.ac.Authenticate(rctx, in, opts...)
|
|
||||||
return err
|
|
||||||
}, nonRepeatable)
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|||||||
389
vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go
generated
vendored
Normal file
389
vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go
generated
vendored
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
|
||||||
|
// fine grained error checking required by write-at-most-once retry semantics of etcd.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// unaryClientInterceptor returns a new retrying unary client interceptor.
|
||||||
|
//
|
||||||
|
// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
|
||||||
|
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
|
||||||
|
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
|
||||||
|
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
||||||
|
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||||
|
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||||
|
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
||||||
|
// short circuit for simplicity, and avoiding allocations.
|
||||||
|
if callOpts.max == 0 {
|
||||||
|
return invoker(ctx, method, req, reply, cc, grpcOpts...)
|
||||||
|
}
|
||||||
|
var lastErr error
|
||||||
|
for attempt := uint(0); attempt < callOpts.max; attempt++ {
|
||||||
|
if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Debug(
|
||||||
|
"retrying of unary invoker",
|
||||||
|
zap.String("target", cc.Target()),
|
||||||
|
zap.Uint("attempt", attempt),
|
||||||
|
)
|
||||||
|
lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
|
||||||
|
if lastErr == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logger.Warn(
|
||||||
|
"retrying of unary invoker failed",
|
||||||
|
zap.String("target", cc.Target()),
|
||||||
|
zap.Uint("attempt", attempt),
|
||||||
|
zap.Error(lastErr),
|
||||||
|
)
|
||||||
|
if isContextError(lastErr) {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
// its the context deadline or cancellation.
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
// its the callCtx deadline or cancellation, in which case try again.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
|
||||||
|
gterr := c.getToken(ctx)
|
||||||
|
if gterr != nil {
|
||||||
|
logger.Warn(
|
||||||
|
"retrying of unary invoker failed to fetch new auth token",
|
||||||
|
zap.String("target", cc.Target()),
|
||||||
|
zap.Error(gterr),
|
||||||
|
)
|
||||||
|
return gterr // lastErr must be invalid auth token
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !isSafeRetry(c.lg, lastErr, callOpts) {
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
|
||||||
|
//
|
||||||
|
// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
|
||||||
|
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
|
||||||
|
//
|
||||||
|
// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
|
||||||
|
// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
|
||||||
|
// BidiStreams), the retry interceptor will fail the call.
|
||||||
|
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
|
||||||
|
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
||||||
|
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
|
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||||
|
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
||||||
|
// short circuit for simplicity, and avoiding allocations.
|
||||||
|
if callOpts.max == 0 {
|
||||||
|
return streamer(ctx, desc, cc, method, grpcOpts...)
|
||||||
|
}
|
||||||
|
if desc.ClientStreams {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
|
||||||
|
}
|
||||||
|
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
|
||||||
|
logger.Warn("retry stream intercept", zap.Error(err))
|
||||||
|
if err != nil {
|
||||||
|
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
retryingStreamer := &serverStreamingRetryingStream{
|
||||||
|
client: c,
|
||||||
|
ClientStream: newStreamer,
|
||||||
|
callOpts: callOpts,
|
||||||
|
ctx: ctx,
|
||||||
|
streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
|
||||||
|
return streamer(ctx, desc, cc, method, grpcOpts...)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return retryingStreamer, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
|
||||||
|
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
|
||||||
|
// a new ClientStream according to the retry policy.
|
||||||
|
type serverStreamingRetryingStream struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
client *Client
|
||||||
|
bufferedSends []interface{} // single message that the client can sen
|
||||||
|
receivedGood bool // indicates whether any prior receives were successful
|
||||||
|
wasClosedSend bool // indicates that CloseSend was closed
|
||||||
|
ctx context.Context
|
||||||
|
callOpts *options
|
||||||
|
streamerCall func(ctx context.Context) (grpc.ClientStream, error)
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.ClientStream = clientStream
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.bufferedSends = append(s.bufferedSends, m)
|
||||||
|
s.mu.Unlock()
|
||||||
|
return s.getStream().SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) CloseSend() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.wasClosedSend = true
|
||||||
|
s.mu.Unlock()
|
||||||
|
return s.getStream().CloseSend()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
|
||||||
|
return s.getStream().Header()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
|
||||||
|
return s.getStream().Trailer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
|
||||||
|
attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
|
||||||
|
if !attemptRetry {
|
||||||
|
return lastErr // success or hard failure
|
||||||
|
}
|
||||||
|
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
|
||||||
|
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
|
||||||
|
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.setStream(newStream)
|
||||||
|
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
|
||||||
|
//fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
|
||||||
|
if !attemptRetry {
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
|
||||||
|
s.mu.RLock()
|
||||||
|
wasGood := s.receivedGood
|
||||||
|
s.mu.RUnlock()
|
||||||
|
err := s.getStream().RecvMsg(m)
|
||||||
|
if err == nil || err == io.EOF {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.receivedGood = true
|
||||||
|
s.mu.Unlock()
|
||||||
|
return false, err
|
||||||
|
} else if wasGood {
|
||||||
|
// previous RecvMsg in the stream succeeded, no retry logic should interfere
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if isContextError(err) {
|
||||||
|
if s.ctx.Err() != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
// its the callCtx deadline or cancellation, in which case try again.
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||||
|
gterr := s.client.getToken(s.ctx)
|
||||||
|
if gterr != nil {
|
||||||
|
s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
|
||||||
|
return false, err // return the original error for simplicity
|
||||||
|
}
|
||||||
|
return true, err
|
||||||
|
|
||||||
|
}
|
||||||
|
return isSafeRetry(s.client.lg, err, s.callOpts), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
|
||||||
|
s.mu.RLock()
|
||||||
|
bufferedSends := s.bufferedSends
|
||||||
|
s.mu.RUnlock()
|
||||||
|
newStream, err := s.streamerCall(callCtx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, msg := range bufferedSends {
|
||||||
|
if err := newStream.SendMsg(msg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := newStream.CloseSend(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newStream, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
|
||||||
|
waitTime := time.Duration(0)
|
||||||
|
if attempt > 0 {
|
||||||
|
waitTime = callOpts.backoffFunc(attempt)
|
||||||
|
}
|
||||||
|
if waitTime > 0 {
|
||||||
|
timer := time.NewTimer(waitTime)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
timer.Stop()
|
||||||
|
return contextErrToGrpcErr(ctx.Err())
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSafeRetry returns "true", if request is safe for retry with the given error.
|
||||||
|
func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
|
||||||
|
if isContextError(err) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch callOpts.retryPolicy {
|
||||||
|
case repeatable:
|
||||||
|
return isSafeRetryImmutableRPC(err)
|
||||||
|
case nonRepeatable:
|
||||||
|
return isSafeRetryMutableRPC(err)
|
||||||
|
default:
|
||||||
|
lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isContextError(err error) bool {
|
||||||
|
return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
|
||||||
|
}
|
||||||
|
|
||||||
|
func contextErrToGrpcErr(err error) error {
|
||||||
|
switch err {
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||||
|
case context.Canceled:
|
||||||
|
return status.Errorf(codes.Canceled, err.Error())
|
||||||
|
default:
|
||||||
|
return status.Errorf(codes.Unknown, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultOptions = &options{
|
||||||
|
retryPolicy: nonRepeatable,
|
||||||
|
max: 0, // disable
|
||||||
|
backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
|
||||||
|
retryAuth: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// backoffFunc denotes a family of functions that control the backoff duration between call retries.
|
||||||
|
//
|
||||||
|
// They are called with an identifier of the attempt, and should return a time the system client should
|
||||||
|
// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
|
||||||
|
// the deadline of the request takes precedence and the wait will be interrupted before proceeding
|
||||||
|
// with the next iteration.
|
||||||
|
type backoffFunc func(attempt uint) time.Duration
|
||||||
|
|
||||||
|
// withRetryPolicy sets the retry policy of this call.
|
||||||
|
func withRetryPolicy(rp retryPolicy) retryOption {
|
||||||
|
return retryOption{applyFunc: func(o *options) {
|
||||||
|
o.retryPolicy = rp
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// withMax sets the maximum number of retries on this call, or this interceptor.
|
||||||
|
func withMax(maxRetries uint) retryOption {
|
||||||
|
return retryOption{applyFunc: func(o *options) {
|
||||||
|
o.max = maxRetries
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackoff sets the `BackoffFunc `used to control time between retries.
|
||||||
|
func withBackoff(bf backoffFunc) retryOption {
|
||||||
|
return retryOption{applyFunc: func(o *options) {
|
||||||
|
o.backoffFunc = bf
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
type options struct {
|
||||||
|
retryPolicy retryPolicy
|
||||||
|
max uint
|
||||||
|
backoffFunc backoffFunc
|
||||||
|
retryAuth bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
|
||||||
|
type retryOption struct {
|
||||||
|
grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
|
||||||
|
applyFunc func(opt *options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
|
||||||
|
if len(retryOptions) == 0 {
|
||||||
|
return opt
|
||||||
|
}
|
||||||
|
optCopy := &options{}
|
||||||
|
*optCopy = *opt
|
||||||
|
for _, f := range retryOptions {
|
||||||
|
f.applyFunc(optCopy)
|
||||||
|
}
|
||||||
|
return optCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
|
||||||
|
for _, opt := range callOptions {
|
||||||
|
if co, ok := opt.(retryOption); ok {
|
||||||
|
retryOptions = append(retryOptions, co)
|
||||||
|
} else {
|
||||||
|
grpcOptions = append(grpcOptions, opt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return grpcOptions, retryOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
|
||||||
|
//
|
||||||
|
// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
|
||||||
|
func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
|
||||||
|
return func(attempt uint) time.Duration {
|
||||||
|
return jitterUp(waitBetween, jitterFraction)
|
||||||
|
}
|
||||||
|
}
|
||||||
49
vendor/github.com/coreos/etcd/clientv3/utils.go
generated
vendored
Normal file
49
vendor/github.com/coreos/etcd/clientv3/utils.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// jitterUp adds random jitter to the duration.
|
||||||
|
//
|
||||||
|
// This adds or subtracts time from the duration within a given jitter fraction.
|
||||||
|
// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
|
||||||
|
//
|
||||||
|
// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
|
||||||
|
func jitterUp(duration time.Duration, jitter float64) time.Duration {
|
||||||
|
multiplier := jitter * (rand.Float64()*2 - 1)
|
||||||
|
return time.Duration(float64(duration) * (1 + multiplier))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the provided function is being called in the op options.
|
||||||
|
func isOpFuncCalled(op string, opts []OpOption) bool {
|
||||||
|
for _, opt := range opts {
|
||||||
|
v := reflect.ValueOf(opt)
|
||||||
|
if v.Kind() == reflect.Func {
|
||||||
|
if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
|
||||||
|
if strings.Contains(opFunc.Name(), op) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
215
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
215
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
@@ -16,6 +16,7 @@ package clientv3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -46,8 +47,33 @@ type Watcher interface {
|
|||||||
// through the returned channel. If revisions waiting to be sent over the
|
// through the returned channel. If revisions waiting to be sent over the
|
||||||
// watch are compacted, then the watch will be canceled by the server, the
|
// watch are compacted, then the watch will be canceled by the server, the
|
||||||
// client will post a compacted error watch response, and the channel will close.
|
// client will post a compacted error watch response, and the channel will close.
|
||||||
|
// If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
|
||||||
|
// and "WatchResponse" from this closed channel has zero events and nil "Err()".
|
||||||
|
// The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
|
||||||
|
// to release the associated resources.
|
||||||
|
//
|
||||||
|
// If the context is "context.Background/TODO", returned "WatchChan" will
|
||||||
|
// not be closed and block until event is triggered, except when server
|
||||||
|
// returns a non-recoverable error (e.g. ErrCompacted).
|
||||||
|
// For example, when context passed with "WithRequireLeader" and the
|
||||||
|
// connected server has no leader (e.g. due to network partition),
|
||||||
|
// error "etcdserver: no leader" (ErrNoLeader) will be returned,
|
||||||
|
// and then "WatchChan" is closed with non-nil "Err()".
|
||||||
|
// In order to prevent a watch stream being stuck in a partitioned node,
|
||||||
|
// make sure to wrap context with "WithRequireLeader".
|
||||||
|
//
|
||||||
|
// Otherwise, as long as the context has not been canceled or timed out,
|
||||||
|
// watch will retry on other recoverable errors forever until reconnected.
|
||||||
|
//
|
||||||
|
// TODO: explicitly set context error in the last "WatchResponse" message and close channel?
|
||||||
|
// Currently, client contexts are overwritten with "valCtx" that never closes.
|
||||||
|
// TODO(v3.4): configure watch retry policy, limit maximum retry number
|
||||||
|
// (see https://github.com/etcd-io/etcd/issues/8980)
|
||||||
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
||||||
|
|
||||||
|
// RequestProgress requests a progress notify response be sent in all watch channels.
|
||||||
|
RequestProgress(ctx context.Context) error
|
||||||
|
|
||||||
// Close closes the watcher and cancels all watch requests.
|
// Close closes the watcher and cancels all watch requests.
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
@@ -134,7 +160,7 @@ type watchGrpcStream struct {
|
|||||||
resuming []*watcherStream
|
resuming []*watcherStream
|
||||||
|
|
||||||
// reqc sends a watch request from Watch() to the main goroutine
|
// reqc sends a watch request from Watch() to the main goroutine
|
||||||
reqc chan *watchRequest
|
reqc chan watchStreamRequest
|
||||||
// respc receives data from the watch client
|
// respc receives data from the watch client
|
||||||
respc chan *pb.WatchResponse
|
respc chan *pb.WatchResponse
|
||||||
// donec closes to broadcast shutdown
|
// donec closes to broadcast shutdown
|
||||||
@@ -152,16 +178,27 @@ type watchGrpcStream struct {
|
|||||||
closeErr error
|
closeErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// watchStreamRequest is a union of the supported watch request operation types
|
||||||
|
type watchStreamRequest interface {
|
||||||
|
toPB() *pb.WatchRequest
|
||||||
|
}
|
||||||
|
|
||||||
// watchRequest is issued by the subscriber to start a new watcher
|
// watchRequest is issued by the subscriber to start a new watcher
|
||||||
type watchRequest struct {
|
type watchRequest struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
key string
|
key string
|
||||||
end string
|
end string
|
||||||
rev int64
|
rev int64
|
||||||
|
|
||||||
// send created notification event if this field is true
|
// send created notification event if this field is true
|
||||||
createdNotify bool
|
createdNotify bool
|
||||||
// progressNotify is for progress updates
|
// progressNotify is for progress updates
|
||||||
progressNotify bool
|
progressNotify bool
|
||||||
|
// fragmentation should be disabled by default
|
||||||
|
// if true, split watch events when total exceeds
|
||||||
|
// "--max-request-bytes" flag value + 512-byte
|
||||||
|
fragment bool
|
||||||
|
|
||||||
// filters is the list of events to filter out
|
// filters is the list of events to filter out
|
||||||
filters []pb.WatchCreateRequest_FilterType
|
filters []pb.WatchCreateRequest_FilterType
|
||||||
// get the previous key-value pair before the event happens
|
// get the previous key-value pair before the event happens
|
||||||
@@ -170,6 +207,10 @@ type watchRequest struct {
|
|||||||
retc chan chan WatchResponse
|
retc chan chan WatchResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// progressRequest is issued by the subscriber to request watch progress
|
||||||
|
type progressRequest struct {
|
||||||
|
}
|
||||||
|
|
||||||
// watcherStream represents a registered watcher
|
// watcherStream represents a registered watcher
|
||||||
type watcherStream struct {
|
type watcherStream struct {
|
||||||
// initReq is the request that initiated this request
|
// initReq is the request that initiated this request
|
||||||
@@ -227,7 +268,7 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
|||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
substreams: make(map[int64]*watcherStream),
|
substreams: make(map[int64]*watcherStream),
|
||||||
respc: make(chan *pb.WatchResponse),
|
respc: make(chan *pb.WatchResponse),
|
||||||
reqc: make(chan *watchRequest),
|
reqc: make(chan watchStreamRequest),
|
||||||
donec: make(chan struct{}),
|
donec: make(chan struct{}),
|
||||||
errc: make(chan error, 1),
|
errc: make(chan error, 1),
|
||||||
closingc: make(chan *watcherStream),
|
closingc: make(chan *watcherStream),
|
||||||
@@ -256,6 +297,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
|||||||
end: string(ow.end),
|
end: string(ow.end),
|
||||||
rev: ow.rev,
|
rev: ow.rev,
|
||||||
progressNotify: ow.progressNotify,
|
progressNotify: ow.progressNotify,
|
||||||
|
fragment: ow.fragment,
|
||||||
filters: filters,
|
filters: filters,
|
||||||
prevKV: ow.prevKV,
|
prevKV: ow.prevKV,
|
||||||
retc: make(chan chan WatchResponse, 1),
|
retc: make(chan chan WatchResponse, 1),
|
||||||
@@ -292,7 +334,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
|||||||
case <-wr.ctx.Done():
|
case <-wr.ctx.Done():
|
||||||
case <-donec:
|
case <-donec:
|
||||||
if wgs.closeErr != nil {
|
if wgs.closeErr != nil {
|
||||||
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// retry; may have dropped stream from no ctxs
|
// retry; may have dropped stream from no ctxs
|
||||||
@@ -307,7 +349,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
case <-donec:
|
case <-donec:
|
||||||
if wgs.closeErr != nil {
|
if wgs.closeErr != nil {
|
||||||
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// retry; may have dropped stream from no ctxs
|
// retry; may have dropped stream from no ctxs
|
||||||
@@ -329,9 +371,50 @@ func (w *watcher) Close() (err error) {
|
|||||||
err = werr
|
err = werr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Consider context.Canceled as a successful close
|
||||||
|
if err == context.Canceled {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RequestProgress requests a progress notify response be sent in all watch channels.
|
||||||
|
func (w *watcher) RequestProgress(ctx context.Context) (err error) {
|
||||||
|
ctxKey := streamKeyFromCtx(ctx)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.streams == nil {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return fmt.Errorf("no stream found for context")
|
||||||
|
}
|
||||||
|
wgs := w.streams[ctxKey]
|
||||||
|
if wgs == nil {
|
||||||
|
wgs = w.newWatcherGrpcStream(ctx)
|
||||||
|
w.streams[ctxKey] = wgs
|
||||||
|
}
|
||||||
|
donec := wgs.donec
|
||||||
|
reqc := wgs.reqc
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
pr := &progressRequest{}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case reqc <- pr:
|
||||||
|
return nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
if err == nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
case <-donec:
|
||||||
|
if wgs.closeErr != nil {
|
||||||
|
return wgs.closeErr
|
||||||
|
}
|
||||||
|
// retry; may have dropped stream from no ctxs
|
||||||
|
return w.RequestProgress(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (w *watchGrpcStream) close() (err error) {
|
func (w *watchGrpcStream) close() (err error) {
|
||||||
w.cancel()
|
w.cancel()
|
||||||
<-w.donec
|
<-w.donec
|
||||||
@@ -353,7 +436,9 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
|
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
|
||||||
if resp.WatchId == -1 {
|
// check watch ID for backward compatibility (<= v3.3)
|
||||||
|
if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
|
||||||
|
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
|
||||||
// failed; no channel
|
// failed; no channel
|
||||||
close(ws.recvc)
|
close(ws.recvc)
|
||||||
return
|
return
|
||||||
@@ -379,7 +464,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
|
|||||||
}
|
}
|
||||||
// close subscriber's channel
|
// close subscriber's channel
|
||||||
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
|
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
|
||||||
go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
|
go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
|
||||||
} else if ws.outc != nil {
|
} else if ws.outc != nil {
|
||||||
close(ws.outc)
|
close(ws.outc)
|
||||||
}
|
}
|
||||||
@@ -434,31 +519,48 @@ func (w *watchGrpcStream) run() {
|
|||||||
|
|
||||||
cancelSet := make(map[int64]struct{})
|
cancelSet := make(map[int64]struct{})
|
||||||
|
|
||||||
|
var cur *pb.WatchResponse
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
// Watch() requested
|
// Watch() requested
|
||||||
case wreq := <-w.reqc:
|
case req := <-w.reqc:
|
||||||
outc := make(chan WatchResponse, 1)
|
switch wreq := req.(type) {
|
||||||
ws := &watcherStream{
|
case *watchRequest:
|
||||||
initReq: *wreq,
|
outc := make(chan WatchResponse, 1)
|
||||||
id: -1,
|
// TODO: pass custom watch ID?
|
||||||
outc: outc,
|
ws := &watcherStream{
|
||||||
// unbuffered so resumes won't cause repeat events
|
initReq: *wreq,
|
||||||
recvc: make(chan *WatchResponse),
|
id: -1,
|
||||||
|
outc: outc,
|
||||||
|
// unbuffered so resumes won't cause repeat events
|
||||||
|
recvc: make(chan *WatchResponse),
|
||||||
|
}
|
||||||
|
|
||||||
|
ws.donec = make(chan struct{})
|
||||||
|
w.wg.Add(1)
|
||||||
|
go w.serveSubstream(ws, w.resumec)
|
||||||
|
|
||||||
|
// queue up for watcher creation/resume
|
||||||
|
w.resuming = append(w.resuming, ws)
|
||||||
|
if len(w.resuming) == 1 {
|
||||||
|
// head of resume queue, can register a new watcher
|
||||||
|
wc.Send(ws.initReq.toPB())
|
||||||
|
}
|
||||||
|
case *progressRequest:
|
||||||
|
wc.Send(wreq.toPB())
|
||||||
}
|
}
|
||||||
|
|
||||||
ws.donec = make(chan struct{})
|
// new events from the watch client
|
||||||
w.wg.Add(1)
|
|
||||||
go w.serveSubstream(ws, w.resumec)
|
|
||||||
|
|
||||||
// queue up for watcher creation/resume
|
|
||||||
w.resuming = append(w.resuming, ws)
|
|
||||||
if len(w.resuming) == 1 {
|
|
||||||
// head of resume queue, can register a new watcher
|
|
||||||
wc.Send(ws.initReq.toPB())
|
|
||||||
}
|
|
||||||
// New events from the watch client
|
|
||||||
case pbresp := <-w.respc:
|
case pbresp := <-w.respc:
|
||||||
|
if cur == nil || pbresp.Created || pbresp.Canceled {
|
||||||
|
cur = pbresp
|
||||||
|
} else if cur != nil && cur.WatchId == pbresp.WatchId {
|
||||||
|
// merge new events
|
||||||
|
cur.Events = append(cur.Events, pbresp.Events...)
|
||||||
|
// update "Fragment" field; last response with "Fragment" == false
|
||||||
|
cur.Fragment = pbresp.Fragment
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case pbresp.Created:
|
case pbresp.Created:
|
||||||
// response to head of queue creation
|
// response to head of queue creation
|
||||||
@@ -467,9 +569,14 @@ func (w *watchGrpcStream) run() {
|
|||||||
w.dispatchEvent(pbresp)
|
w.dispatchEvent(pbresp)
|
||||||
w.resuming[0] = nil
|
w.resuming[0] = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ws := w.nextResume(); ws != nil {
|
if ws := w.nextResume(); ws != nil {
|
||||||
wc.Send(ws.initReq.toPB())
|
wc.Send(ws.initReq.toPB())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reset for next iteration
|
||||||
|
cur = nil
|
||||||
|
|
||||||
case pbresp.Canceled && pbresp.CompactRevision == 0:
|
case pbresp.Canceled && pbresp.CompactRevision == 0:
|
||||||
delete(cancelSet, pbresp.WatchId)
|
delete(cancelSet, pbresp.WatchId)
|
||||||
if ws, ok := w.substreams[pbresp.WatchId]; ok {
|
if ws, ok := w.substreams[pbresp.WatchId]; ok {
|
||||||
@@ -477,15 +584,31 @@ func (w *watchGrpcStream) run() {
|
|||||||
close(ws.recvc)
|
close(ws.recvc)
|
||||||
closing[ws] = struct{}{}
|
closing[ws] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reset for next iteration
|
||||||
|
cur = nil
|
||||||
|
|
||||||
|
case cur.Fragment:
|
||||||
|
// watch response events are still fragmented
|
||||||
|
// continue to fetch next fragmented event arrival
|
||||||
|
continue
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// dispatch to appropriate watch stream
|
// dispatch to appropriate watch stream
|
||||||
if ok := w.dispatchEvent(pbresp); ok {
|
ok := w.dispatchEvent(cur)
|
||||||
|
|
||||||
|
// reset for next iteration
|
||||||
|
cur = nil
|
||||||
|
|
||||||
|
if ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// watch response on unexpected watch id; cancel id
|
// watch response on unexpected watch id; cancel id
|
||||||
if _, ok := cancelSet[pbresp.WatchId]; ok {
|
if _, ok := cancelSet[pbresp.WatchId]; ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
cancelSet[pbresp.WatchId] = struct{}{}
|
cancelSet[pbresp.WatchId] = struct{}{}
|
||||||
cr := &pb.WatchRequest_CancelRequest{
|
cr := &pb.WatchRequest_CancelRequest{
|
||||||
CancelRequest: &pb.WatchCancelRequest{
|
CancelRequest: &pb.WatchCancelRequest{
|
||||||
@@ -495,6 +618,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
req := &pb.WatchRequest{RequestUnion: cr}
|
req := &pb.WatchRequest{RequestUnion: cr}
|
||||||
wc.Send(req)
|
wc.Send(req)
|
||||||
}
|
}
|
||||||
|
|
||||||
// watch client failed on Recv; spawn another if possible
|
// watch client failed on Recv; spawn another if possible
|
||||||
case err := <-w.errc:
|
case err := <-w.errc:
|
||||||
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||||
@@ -508,13 +632,15 @@ func (w *watchGrpcStream) run() {
|
|||||||
wc.Send(ws.initReq.toPB())
|
wc.Send(ws.initReq.toPB())
|
||||||
}
|
}
|
||||||
cancelSet = make(map[int64]struct{})
|
cancelSet = make(map[int64]struct{})
|
||||||
|
|
||||||
case <-w.ctx.Done():
|
case <-w.ctx.Done():
|
||||||
return
|
return
|
||||||
|
|
||||||
case ws := <-w.closingc:
|
case ws := <-w.closingc:
|
||||||
w.closeSubstream(ws)
|
w.closeSubstream(ws)
|
||||||
delete(closing, ws)
|
delete(closing, ws)
|
||||||
|
// no more watchers on this stream, shutdown
|
||||||
if len(w.substreams)+len(w.resuming) == 0 {
|
if len(w.substreams)+len(w.resuming) == 0 {
|
||||||
// no more watchers on this stream, shutdown
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -539,6 +665,7 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
|||||||
for i, ev := range pbresp.Events {
|
for i, ev := range pbresp.Events {
|
||||||
events[i] = (*Event)(ev)
|
events[i] = (*Event)(ev)
|
||||||
}
|
}
|
||||||
|
// TODO: return watch ID?
|
||||||
wr := &WatchResponse{
|
wr := &WatchResponse{
|
||||||
Header: *pbresp.Header,
|
Header: *pbresp.Header,
|
||||||
Events: events,
|
Events: events,
|
||||||
@@ -547,7 +674,31 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
|||||||
Canceled: pbresp.Canceled,
|
Canceled: pbresp.Canceled,
|
||||||
cancelReason: pbresp.CancelReason,
|
cancelReason: pbresp.CancelReason,
|
||||||
}
|
}
|
||||||
ws, ok := w.substreams[pbresp.WatchId]
|
|
||||||
|
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
|
||||||
|
// indicate they should be broadcast.
|
||||||
|
if wr.IsProgressNotify() && pbresp.WatchId == -1 {
|
||||||
|
return w.broadcastResponse(wr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.unicastResponse(wr, pbresp.WatchId)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// broadcastResponse send a watch response to all watch substreams.
|
||||||
|
func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
|
||||||
|
for _, ws := range w.substreams {
|
||||||
|
select {
|
||||||
|
case ws.recvc <- wr:
|
||||||
|
case <-ws.donec:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// unicastResponse sends a watch response to a specific watch substream.
|
||||||
|
func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
|
||||||
|
ws, ok := w.substreams[watchId]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -815,11 +966,19 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
|||||||
ProgressNotify: wr.progressNotify,
|
ProgressNotify: wr.progressNotify,
|
||||||
Filters: wr.filters,
|
Filters: wr.filters,
|
||||||
PrevKv: wr.prevKV,
|
PrevKv: wr.prevKV,
|
||||||
|
Fragment: wr.fragment,
|
||||||
}
|
}
|
||||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||||
return &pb.WatchRequest{RequestUnion: cr}
|
return &pb.WatchRequest{RequestUnion: cr}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
|
||||||
|
func (pr *progressRequest) toPB() *pb.WatchRequest {
|
||||||
|
req := &pb.WatchProgressRequest{}
|
||||||
|
cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
|
||||||
|
return &pb.WatchRequest{RequestUnion: cr}
|
||||||
|
}
|
||||||
|
|
||||||
func streamKeyFromCtx(ctx context.Context) string {
|
func streamKeyFromCtx(ctx context.Context) string {
|
||||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||||
return fmt.Sprintf("%+v", md)
|
return fmt.Sprintf("%+v", md)
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/embed/BUILD
generated
vendored
2
vendor/github.com/coreos/etcd/embed/BUILD
generated
vendored
@@ -38,7 +38,6 @@ go_library(
|
|||||||
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
||||||
"//vendor/github.com/coreos/etcd/wal:go_default_library",
|
"//vendor/github.com/coreos/etcd/wal:go_default_library",
|
||||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
|
||||||
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library",
|
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library",
|
||||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
|
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
|
||||||
"//vendor/github.com/soheilhy/cmux:go_default_library",
|
"//vendor/github.com/soheilhy/cmux:go_default_library",
|
||||||
@@ -48,6 +47,7 @@ go_library(
|
|||||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
||||||
|
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/embed/config.go
generated
vendored
2
vendor/github.com/coreos/etcd/embed/config.go
generated
vendored
@@ -36,9 +36,9 @@ import (
|
|||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
|
||||||
"github.com/coreos/pkg/capnslog"
|
"github.com/coreos/pkg/capnslog"
|
||||||
"github.com/ghodss/yaml"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
4
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD
generated
vendored
@@ -11,8 +11,10 @@ go_library(
|
|||||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||||
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
|
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
|
||||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||||
"//vendor/golang.org/x/net/context:go_default_library",
|
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc:go_default_library",
|
"//vendor/google.golang.org/grpc:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
1107
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
generated
vendored
1107
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD
generated
vendored
@@ -10,8 +10,10 @@ go_library(
|
|||||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||||
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
|
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
|
||||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||||
"//vendor/golang.org/x/net/context:go_default_library",
|
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc:go_default_library",
|
"//vendor/google.golang.org/grpc:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
504
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
generated
vendored
504
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
generated
vendored
@@ -1,36 +1,22 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: v3lock.proto
|
// source: v3lock.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package v3lockpb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
v3lock.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
LockRequest
|
|
||||||
LockResponse
|
|
||||||
UnlockRequest
|
|
||||||
UnlockResponse
|
|
||||||
*/
|
|
||||||
package v3lockpb
|
package v3lockpb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
context "context"
|
||||||
|
fmt "fmt"
|
||||||
proto "github.com/golang/protobuf/proto"
|
io "io"
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
_ "github.com/gogo/protobuf/gogoproto"
|
|
||||||
|
|
||||||
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
_ "github.com/gogo/protobuf/gogoproto"
|
||||||
context "golang.org/x/net/context"
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||||
grpc "google.golang.org/grpc"
|
grpc "google.golang.org/grpc"
|
||||||
|
codes "google.golang.org/grpc/codes"
|
||||||
io "io"
|
status "google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@@ -52,13 +38,44 @@ type LockRequest struct {
|
|||||||
// the lock is automatically released. Calls to Lock with the same lease will
|
// the lock is automatically released. Calls to Lock with the same lease will
|
||||||
// be treated as a single acquistion; locking twice with the same lease is a
|
// be treated as a single acquistion; locking twice with the same lease is a
|
||||||
// no-op.
|
// no-op.
|
||||||
Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
|
Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LockRequest) Reset() { *m = LockRequest{} }
|
func (m *LockRequest) Reset() { *m = LockRequest{} }
|
||||||
func (m *LockRequest) String() string { return proto.CompactTextString(m) }
|
func (m *LockRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LockRequest) ProtoMessage() {}
|
func (*LockRequest) ProtoMessage() {}
|
||||||
func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} }
|
func (*LockRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_52389b3e2f253201, []int{0}
|
||||||
|
}
|
||||||
|
func (m *LockRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *LockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_LockRequest.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *LockRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LockRequest.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *LockRequest) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *LockRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LockRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LockRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *LockRequest) GetName() []byte {
|
func (m *LockRequest) GetName() []byte {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -75,17 +92,48 @@ func (m *LockRequest) GetLease() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type LockResponse struct {
|
type LockResponse struct {
|
||||||
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||||
// key is a key that will exist on etcd for the duration that the Lock caller
|
// key is a key that will exist on etcd for the duration that the Lock caller
|
||||||
// owns the lock. Users should not modify this key or the lock may exhibit
|
// owns the lock. Users should not modify this key or the lock may exhibit
|
||||||
// undefined behavior.
|
// undefined behavior.
|
||||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LockResponse) Reset() { *m = LockResponse{} }
|
func (m *LockResponse) Reset() { *m = LockResponse{} }
|
||||||
func (m *LockResponse) String() string { return proto.CompactTextString(m) }
|
func (m *LockResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LockResponse) ProtoMessage() {}
|
func (*LockResponse) ProtoMessage() {}
|
||||||
func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} }
|
func (*LockResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_52389b3e2f253201, []int{1}
|
||||||
|
}
|
||||||
|
func (m *LockResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *LockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_LockResponse.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *LockResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LockResponse.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *LockResponse) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *LockResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LockResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LockResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
|
func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -103,13 +151,44 @@ func (m *LockResponse) GetKey() []byte {
|
|||||||
|
|
||||||
type UnlockRequest struct {
|
type UnlockRequest struct {
|
||||||
// key is the lock ownership key granted by Lock.
|
// key is the lock ownership key granted by Lock.
|
||||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
|
func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
|
||||||
func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
|
func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UnlockRequest) ProtoMessage() {}
|
func (*UnlockRequest) ProtoMessage() {}
|
||||||
func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} }
|
func (*UnlockRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_52389b3e2f253201, []int{2}
|
||||||
|
}
|
||||||
|
func (m *UnlockRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *UnlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_UnlockRequest.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *UnlockRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_UnlockRequest.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *UnlockRequest) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *UnlockRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_UnlockRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_UnlockRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *UnlockRequest) GetKey() []byte {
|
func (m *UnlockRequest) GetKey() []byte {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -119,13 +198,44 @@ func (m *UnlockRequest) GetKey() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type UnlockResponse struct {
|
type UnlockResponse struct {
|
||||||
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
|
func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
|
||||||
func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
|
func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UnlockResponse) ProtoMessage() {}
|
func (*UnlockResponse) ProtoMessage() {}
|
||||||
func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} }
|
func (*UnlockResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_52389b3e2f253201, []int{3}
|
||||||
|
}
|
||||||
|
func (m *UnlockResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *UnlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_UnlockResponse.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *UnlockResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_UnlockResponse.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *UnlockResponse) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *UnlockResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_UnlockResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_UnlockResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
|
func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -141,6 +251,33 @@ func init() {
|
|||||||
proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
|
proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) }
|
||||||
|
|
||||||
|
var fileDescriptor_52389b3e2f253201 = []byte{
|
||||||
|
// 335 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
|
||||||
|
0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
|
||||||
|
0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39,
|
||||||
|
0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a,
|
||||||
|
0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13,
|
||||||
|
0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc,
|
||||||
|
0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79,
|
||||||
|
0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b,
|
||||||
|
0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6,
|
||||||
|
0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a,
|
||||||
|
0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f,
|
||||||
|
0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41,
|
||||||
|
0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a,
|
||||||
|
0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x07, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42,
|
||||||
|
0xc1, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31,
|
||||||
|
0x4d, 0x49, 0xa6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x62, 0x4a, 0x82, 0xfa, 0x65, 0xc6, 0x49, 0xa9,
|
||||||
|
0x25, 0x89, 0xfa, 0x20, 0x45, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x9a, 0x8b, 0x0d, 0xe2, 0x4a,
|
||||||
|
0x21, 0x71, 0x84, 0x7e, 0x14, 0xaf, 0x49, 0x49, 0x60, 0x4a, 0x40, 0x8d, 0x96, 0x03, 0x1b, 0x2d,
|
||||||
|
0xa1, 0x24, 0x8c, 0x62, 0x74, 0x69, 0x1e, 0xd4, 0x70, 0x27, 0x81, 0x13, 0x8f, 0xe4, 0x18, 0x2f,
|
||||||
|
0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, 0x70, 0x7c,
|
||||||
|
0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x10, 0x82, 0x89, 0xf0, 0x45, 0x02, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ context.Context
|
var _ context.Context
|
||||||
var _ grpc.ClientConn
|
var _ grpc.ClientConn
|
||||||
@@ -149,8 +286,9 @@ var _ grpc.ClientConn
|
|||||||
// is compatible with the grpc package it is being compiled against.
|
// is compatible with the grpc package it is being compiled against.
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
const _ = grpc.SupportPackageIsVersion4
|
||||||
|
|
||||||
// Client API for Lock service
|
// LockClient is the client API for Lock service.
|
||||||
|
//
|
||||||
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||||
type LockClient interface {
|
type LockClient interface {
|
||||||
// Lock acquires a distributed shared lock on a given named lock.
|
// Lock acquires a distributed shared lock on a given named lock.
|
||||||
// On success, it will return a unique key that exists so long as the
|
// On success, it will return a unique key that exists so long as the
|
||||||
@@ -175,7 +313,7 @@ func NewLockClient(cc *grpc.ClientConn) LockClient {
|
|||||||
|
|
||||||
func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
|
func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
|
||||||
out := new(LockResponse)
|
out := new(LockResponse)
|
||||||
err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...)
|
err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -184,15 +322,14 @@ func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.Cal
|
|||||||
|
|
||||||
func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
|
func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
|
||||||
out := new(UnlockResponse)
|
out := new(UnlockResponse)
|
||||||
err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...)
|
err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server API for Lock service
|
// LockServer is the server API for Lock service.
|
||||||
|
|
||||||
type LockServer interface {
|
type LockServer interface {
|
||||||
// Lock acquires a distributed shared lock on a given named lock.
|
// Lock acquires a distributed shared lock on a given named lock.
|
||||||
// On success, it will return a unique key that exists so long as the
|
// On success, it will return a unique key that exists so long as the
|
||||||
@@ -207,6 +344,17 @@ type LockServer interface {
|
|||||||
Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
|
Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnimplementedLockServer can be embedded to have forward compatible implementations.
|
||||||
|
type UnimplementedLockServer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*UnimplementedLockServer) Lock(ctx context.Context, req *LockRequest) (*LockResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Lock not implemented")
|
||||||
|
}
|
||||||
|
func (*UnimplementedLockServer) Unlock(ctx context.Context, req *UnlockRequest) (*UnlockResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Unlock not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
func RegisterLockServer(s *grpc.Server, srv LockServer) {
|
func RegisterLockServer(s *grpc.Server, srv LockServer) {
|
||||||
s.RegisterService(&_Lock_serviceDesc, srv)
|
s.RegisterService(&_Lock_serviceDesc, srv)
|
||||||
}
|
}
|
||||||
@@ -267,7 +415,7 @@ var _Lock_serviceDesc = grpc.ServiceDesc{
|
|||||||
func (m *LockRequest) Marshal() (dAtA []byte, err error) {
|
func (m *LockRequest) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -275,28 +423,38 @@ func (m *LockRequest) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) {
|
func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Name) > 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name)))
|
|
||||||
i += copy(dAtA[i:], m.Name)
|
|
||||||
}
|
}
|
||||||
if m.Lease != 0 {
|
if m.Lease != 0 {
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease))
|
i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
}
|
}
|
||||||
return i, nil
|
if len(m.Name) > 0 {
|
||||||
|
i -= len(m.Name)
|
||||||
|
copy(dAtA[i:], m.Name)
|
||||||
|
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LockResponse) Marshal() (dAtA []byte, err error) {
|
func (m *LockResponse) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -304,33 +462,45 @@ func (m *LockResponse) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) {
|
func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Header != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
|
|
||||||
n1, err := m.Header.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n1
|
|
||||||
}
|
}
|
||||||
if len(m.Key) > 0 {
|
if len(m.Key) > 0 {
|
||||||
dAtA[i] = 0x12
|
i -= len(m.Key)
|
||||||
i++
|
copy(dAtA[i:], m.Key)
|
||||||
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
|
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
|
||||||
i += copy(dAtA[i:], m.Key)
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.Header != nil {
|
||||||
|
{
|
||||||
|
size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintV3Lock(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
|
func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -338,23 +508,33 @@ func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) {
|
func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *UnlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Key) > 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
|
|
||||||
i += copy(dAtA[i:], m.Key)
|
|
||||||
}
|
}
|
||||||
return i, nil
|
if len(m.Key) > 0 {
|
||||||
|
i -= len(m.Key)
|
||||||
|
copy(dAtA[i:], m.Key)
|
||||||
|
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
|
func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -362,33 +542,49 @@ func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) {
|
func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *UnlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Header != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
|
|
||||||
n2, err := m.Header.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n2
|
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.Header != nil {
|
||||||
|
{
|
||||||
|
size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintV3Lock(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovV3Lock(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *LockRequest) Size() (n int) {
|
func (m *LockRequest) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
l = len(m.Name)
|
l = len(m.Name)
|
||||||
@@ -398,10 +594,16 @@ func (m *LockRequest) Size() (n int) {
|
|||||||
if m.Lease != 0 {
|
if m.Lease != 0 {
|
||||||
n += 1 + sovV3Lock(uint64(m.Lease))
|
n += 1 + sovV3Lock(uint64(m.Lease))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LockResponse) Size() (n int) {
|
func (m *LockResponse) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Header != nil {
|
if m.Header != nil {
|
||||||
@@ -412,38 +614,46 @@ func (m *LockResponse) Size() (n int) {
|
|||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovV3Lock(uint64(l))
|
n += 1 + l + sovV3Lock(uint64(l))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockRequest) Size() (n int) {
|
func (m *UnlockRequest) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
l = len(m.Key)
|
l = len(m.Key)
|
||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovV3Lock(uint64(l))
|
n += 1 + l + sovV3Lock(uint64(l))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UnlockResponse) Size() (n int) {
|
func (m *UnlockResponse) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Header != nil {
|
if m.Header != nil {
|
||||||
l = m.Header.Size()
|
l = m.Header.Size()
|
||||||
n += 1 + l + sovV3Lock(uint64(l))
|
n += 1 + l + sovV3Lock(uint64(l))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func sovV3Lock(x uint64) (n int) {
|
func sovV3Lock(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozV3Lock(x uint64) (n int) {
|
func sozV3Lock(x uint64) (n int) {
|
||||||
return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@@ -463,7 +673,7 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -491,7 +701,7 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -500,6 +710,9 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -522,7 +735,7 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Lease |= (int64(b) & 0x7F) << shift
|
m.Lease |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -536,9 +749,13 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -563,7 +780,7 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -591,7 +808,7 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -600,6 +817,9 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + msglen
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -624,7 +844,7 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -633,6 +853,9 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -650,9 +873,13 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -677,7 +904,7 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -705,7 +932,7 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -714,6 +941,9 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -731,9 +961,13 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -758,7 +992,7 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -786,7 +1020,7 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -795,6 +1029,9 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + msglen
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -814,9 +1051,13 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthV3Lock
|
return ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -880,10 +1121,13 @@ func skipV3Lock(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iNdEx += length
|
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return 0, ErrInvalidLengthV3Lock
|
return 0, ErrInvalidLengthV3Lock
|
||||||
}
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
for {
|
||||||
@@ -912,6 +1156,9 @@ func skipV3Lock(dAtA []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
iNdEx = start + next
|
iNdEx = start + next
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthV3Lock
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 4:
|
case 4:
|
||||||
@@ -930,30 +1177,3 @@ var (
|
|||||||
ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) }
|
|
||||||
|
|
||||||
var fileDescriptorV3Lock = []byte{
|
|
||||||
// 335 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
|
|
||||||
0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
|
|
||||||
0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39,
|
|
||||||
0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a,
|
|
||||||
0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13,
|
|
||||||
0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc,
|
|
||||||
0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79,
|
|
||||||
0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b,
|
|
||||||
0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6,
|
|
||||||
0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a,
|
|
||||||
0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f,
|
|
||||||
0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41,
|
|
||||||
0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a,
|
|
||||||
0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x07, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42,
|
|
||||||
0xc1, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31,
|
|
||||||
0x4d, 0x49, 0xa6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x62, 0x4a, 0x82, 0xfa, 0x65, 0xc6, 0x49, 0xa9,
|
|
||||||
0x25, 0x89, 0xfa, 0x20, 0x45, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x9a, 0x8b, 0x0d, 0xe2, 0x4a,
|
|
||||||
0x21, 0x71, 0x84, 0x7e, 0x14, 0xaf, 0x49, 0x49, 0x60, 0x4a, 0x40, 0x8d, 0x96, 0x03, 0x1b, 0x2d,
|
|
||||||
0xa1, 0x24, 0x8c, 0x62, 0x74, 0x69, 0x1e, 0xd4, 0x70, 0x27, 0x81, 0x13, 0x8f, 0xe4, 0x18, 0x2f,
|
|
||||||
0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, 0x70, 0x7c,
|
|
||||||
0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x10, 0x82, 0x89, 0xf0, 0x45, 0x02, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
@@ -179,7 +179,7 @@ func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error {
|
|||||||
// there is an overlap, returns an error. If no overlap, return put and delete
|
// there is an overlap, returns an error. If no overlap, return put and delete
|
||||||
// sets for recursive evaluation.
|
// sets for recursive evaluation.
|
||||||
func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
|
func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
|
||||||
var dels adt.IntervalTree
|
dels := adt.NewIntervalTree()
|
||||||
|
|
||||||
// collect deletes from this level; build first to check lower level overlapped puts
|
// collect deletes from this level; build first to check lower level overlapped puts
|
||||||
for _, req := range reqs {
|
for _, req := range reqs {
|
||||||
|
|||||||
1
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD
generated
vendored
@@ -6,6 +6,7 @@ go_library(
|
|||||||
"doc.go",
|
"doc.go",
|
||||||
"error.go",
|
"error.go",
|
||||||
"md.go",
|
"md.go",
|
||||||
|
"metadatafields.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
importpath = "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
@@ -61,6 +61,7 @@ var (
|
|||||||
|
|
||||||
ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
|
ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
|
||||||
ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
|
ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
|
||||||
|
ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
|
||||||
ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
|
ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
|
||||||
ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
|
ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
|
||||||
ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
|
ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
|
||||||
@@ -163,6 +164,7 @@ var (
|
|||||||
|
|
||||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||||
ErrNotLeader = Error(ErrGRPCNotLeader)
|
ErrNotLeader = Error(ErrGRPCNotLeader)
|
||||||
|
ErrLeaderChanged = Error(ErrGRPCLeaderChanged)
|
||||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||||
ErrStopped = Error(ErrGRPCStopped)
|
ErrStopped = Error(ErrGRPCStopped)
|
||||||
ErrTimeout = Error(ErrGRPCTimeout)
|
ErrTimeout = Error(ErrGRPCTimeout)
|
||||||
|
|||||||
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
generated
vendored
Normal file
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package rpctypes
|
||||||
|
|
||||||
|
var (
|
||||||
|
TokenFieldNameGRPC = "token"
|
||||||
|
TokenFieldNameSwagger = "authorization"
|
||||||
|
)
|
||||||
1
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
1
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
@@ -44,6 +44,7 @@ var toGRPCErrorMap = map[error]error{
|
|||||||
|
|
||||||
etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
|
etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
|
||||||
etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
|
etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
|
||||||
|
etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
|
||||||
etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
|
etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
|
||||||
etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
|
etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
|
||||||
etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
|
etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
|
||||||
|
|||||||
99
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
99
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
@@ -31,6 +31,9 @@ import (
|
|||||||
type watchServer struct {
|
type watchServer struct {
|
||||||
clusterID int64
|
clusterID int64
|
||||||
memberID int64
|
memberID int64
|
||||||
|
|
||||||
|
maxRequestBytes int
|
||||||
|
|
||||||
raftTimer etcdserver.RaftTimer
|
raftTimer etcdserver.RaftTimer
|
||||||
watchable mvcc.WatchableKV
|
watchable mvcc.WatchableKV
|
||||||
|
|
||||||
@@ -39,11 +42,12 @@ type watchServer struct {
|
|||||||
|
|
||||||
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||||
return &watchServer{
|
return &watchServer{
|
||||||
clusterID: int64(s.Cluster().ID()),
|
clusterID: int64(s.Cluster().ID()),
|
||||||
memberID: int64(s.ID()),
|
memberID: int64(s.ID()),
|
||||||
raftTimer: s,
|
maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes),
|
||||||
watchable: s.Watchable(),
|
raftTimer: s,
|
||||||
ag: s,
|
watchable: s.Watchable(),
|
||||||
|
ag: s,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,6 +87,9 @@ const (
|
|||||||
type serverWatchStream struct {
|
type serverWatchStream struct {
|
||||||
clusterID int64
|
clusterID int64
|
||||||
memberID int64
|
memberID int64
|
||||||
|
|
||||||
|
maxRequestBytes int
|
||||||
|
|
||||||
raftTimer etcdserver.RaftTimer
|
raftTimer etcdserver.RaftTimer
|
||||||
|
|
||||||
watchable mvcc.WatchableKV
|
watchable mvcc.WatchableKV
|
||||||
@@ -92,12 +99,14 @@ type serverWatchStream struct {
|
|||||||
ctrlStream chan *pb.WatchResponse
|
ctrlStream chan *pb.WatchResponse
|
||||||
|
|
||||||
// mu protects progress, prevKV
|
// mu protects progress, prevKV
|
||||||
mu sync.Mutex
|
mu sync.RWMutex
|
||||||
// progress tracks the watchID that stream might need to send
|
// progress tracks the watchID that stream might need to send
|
||||||
// progress to.
|
// progress to.
|
||||||
// TODO: combine progress and prevKV into a single struct?
|
// TODO: combine progress and prevKV into a single struct?
|
||||||
progress map[mvcc.WatchID]bool
|
progress map[mvcc.WatchID]bool
|
||||||
prevKV map[mvcc.WatchID]bool
|
prevKV map[mvcc.WatchID]bool
|
||||||
|
// records fragmented watch IDs
|
||||||
|
fragment map[mvcc.WatchID]bool
|
||||||
|
|
||||||
// closec indicates the stream is closed.
|
// closec indicates the stream is closed.
|
||||||
closec chan struct{}
|
closec chan struct{}
|
||||||
@@ -112,6 +121,9 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
|||||||
sws := serverWatchStream{
|
sws := serverWatchStream{
|
||||||
clusterID: ws.clusterID,
|
clusterID: ws.clusterID,
|
||||||
memberID: ws.memberID,
|
memberID: ws.memberID,
|
||||||
|
|
||||||
|
maxRequestBytes: ws.maxRequestBytes,
|
||||||
|
|
||||||
raftTimer: ws.raftTimer,
|
raftTimer: ws.raftTimer,
|
||||||
|
|
||||||
watchable: ws.watchable,
|
watchable: ws.watchable,
|
||||||
@@ -122,6 +134,7 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
|||||||
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
|
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
|
||||||
progress: make(map[mvcc.WatchID]bool),
|
progress: make(map[mvcc.WatchID]bool),
|
||||||
prevKV: make(map[mvcc.WatchID]bool),
|
prevKV: make(map[mvcc.WatchID]bool),
|
||||||
|
fragment: make(map[mvcc.WatchID]bool),
|
||||||
closec: make(chan struct{}),
|
closec: make(chan struct{}),
|
||||||
|
|
||||||
ag: ws.ag,
|
ag: ws.ag,
|
||||||
@@ -238,6 +251,9 @@ func (sws *serverWatchStream) recvLoop() error {
|
|||||||
if creq.PrevKv {
|
if creq.PrevKv {
|
||||||
sws.prevKV[id] = true
|
sws.prevKV[id] = true
|
||||||
}
|
}
|
||||||
|
if creq.Fragment {
|
||||||
|
sws.fragment[id] = true
|
||||||
|
}
|
||||||
sws.mu.Unlock()
|
sws.mu.Unlock()
|
||||||
}
|
}
|
||||||
wr := &pb.WatchResponse{
|
wr := &pb.WatchResponse{
|
||||||
@@ -264,9 +280,17 @@ func (sws *serverWatchStream) recvLoop() error {
|
|||||||
sws.mu.Lock()
|
sws.mu.Lock()
|
||||||
delete(sws.progress, mvcc.WatchID(id))
|
delete(sws.progress, mvcc.WatchID(id))
|
||||||
delete(sws.prevKV, mvcc.WatchID(id))
|
delete(sws.prevKV, mvcc.WatchID(id))
|
||||||
|
delete(sws.fragment, mvcc.WatchID(id))
|
||||||
sws.mu.Unlock()
|
sws.mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case *pb.WatchRequest_ProgressRequest:
|
||||||
|
if uv.ProgressRequest != nil {
|
||||||
|
sws.ctrlStream <- &pb.WatchResponse{
|
||||||
|
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||||
|
WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
|
||||||
|
}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
// we probably should not shutdown the entire stream when
|
// we probably should not shutdown the entire stream when
|
||||||
// receive an valid command.
|
// receive an valid command.
|
||||||
@@ -310,9 +334,9 @@ func (sws *serverWatchStream) sendLoop() {
|
|||||||
// or define protocol buffer with []mvccpb.Event.
|
// or define protocol buffer with []mvccpb.Event.
|
||||||
evs := wresp.Events
|
evs := wresp.Events
|
||||||
events := make([]*mvccpb.Event, len(evs))
|
events := make([]*mvccpb.Event, len(evs))
|
||||||
sws.mu.Lock()
|
sws.mu.RLock()
|
||||||
needPrevKV := sws.prevKV[wresp.WatchID]
|
needPrevKV := sws.prevKV[wresp.WatchID]
|
||||||
sws.mu.Unlock()
|
sws.mu.RUnlock()
|
||||||
for i := range evs {
|
for i := range evs {
|
||||||
events[i] = &evs[i]
|
events[i] = &evs[i]
|
||||||
|
|
||||||
@@ -342,11 +366,23 @@ func (sws *serverWatchStream) sendLoop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
mvcc.ReportEventReceived(len(evs))
|
mvcc.ReportEventReceived(len(evs))
|
||||||
if err := sws.gRPCStream.Send(wr); err != nil {
|
|
||||||
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
|
sws.mu.RLock()
|
||||||
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
|
fragmented, ok := sws.fragment[wresp.WatchID]
|
||||||
|
sws.mu.RUnlock()
|
||||||
|
|
||||||
|
var serr error
|
||||||
|
if !fragmented && !ok {
|
||||||
|
serr = sws.gRPCStream.Send(wr)
|
||||||
|
} else {
|
||||||
|
serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send)
|
||||||
|
}
|
||||||
|
|
||||||
|
if serr != nil {
|
||||||
|
if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) {
|
||||||
|
plog.Debugf("failed to send watch response to gRPC stream (%q)", serr.Error())
|
||||||
} else {
|
} else {
|
||||||
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
|
plog.Warningf("failed to send watch response to gRPC stream (%q)", serr.Error())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -409,6 +445,45 @@ func (sws *serverWatchStream) sendLoop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sendFragments(
|
||||||
|
wr *pb.WatchResponse,
|
||||||
|
maxRequestBytes int,
|
||||||
|
sendFunc func(*pb.WatchResponse) error) error {
|
||||||
|
// no need to fragment if total request size is smaller
|
||||||
|
// than max request limit or response contains only one event
|
||||||
|
if wr.Size() < maxRequestBytes || len(wr.Events) < 2 {
|
||||||
|
return sendFunc(wr)
|
||||||
|
}
|
||||||
|
|
||||||
|
ow := *wr
|
||||||
|
ow.Events = make([]*mvccpb.Event, 0)
|
||||||
|
ow.Fragment = true
|
||||||
|
|
||||||
|
var idx int
|
||||||
|
for {
|
||||||
|
cur := ow
|
||||||
|
for _, ev := range wr.Events[idx:] {
|
||||||
|
cur.Events = append(cur.Events, ev)
|
||||||
|
if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes {
|
||||||
|
cur.Events = cur.Events[:len(cur.Events)-1]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
idx++
|
||||||
|
}
|
||||||
|
if idx == len(wr.Events) {
|
||||||
|
// last response has no more fragment
|
||||||
|
cur.Fragment = false
|
||||||
|
}
|
||||||
|
if err := sendFunc(&cur); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !cur.Fragment {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (sws *serverWatchStream) close() {
|
func (sws *serverWatchStream) close() {
|
||||||
sws.watchStream.Close()
|
sws.watchStream.Close()
|
||||||
close(sws.closec)
|
close(sws.closec)
|
||||||
|
|||||||
1
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
1
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
@@ -27,6 +27,7 @@ var (
|
|||||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
||||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
||||||
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||||
|
ErrLeaderChanged = errors.New("etcdserver: leader changed")
|
||||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||||
ErrNotLeader = errors.New("etcdserver: not leader")
|
ErrNotLeader = errors.New("etcdserver: not leader")
|
||||||
|
|||||||
4
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD
generated
vendored
@@ -16,8 +16,10 @@ go_library(
|
|||||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||||
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
|
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
|
||||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||||
"//vendor/golang.org/x/net/context:go_default_library",
|
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
|
||||||
"//vendor/google.golang.org/grpc:go_default_library",
|
"//vendor/google.golang.org/grpc:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
613
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
613
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
@@ -1,119 +1,16 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: etcdserver.proto
|
// source: etcdserver.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package etcdserverpb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
etcdserver.proto
|
|
||||||
raft_internal.proto
|
|
||||||
rpc.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Request
|
|
||||||
Metadata
|
|
||||||
RequestHeader
|
|
||||||
InternalRaftRequest
|
|
||||||
EmptyResponse
|
|
||||||
InternalAuthenticateRequest
|
|
||||||
ResponseHeader
|
|
||||||
RangeRequest
|
|
||||||
RangeResponse
|
|
||||||
PutRequest
|
|
||||||
PutResponse
|
|
||||||
DeleteRangeRequest
|
|
||||||
DeleteRangeResponse
|
|
||||||
RequestOp
|
|
||||||
ResponseOp
|
|
||||||
Compare
|
|
||||||
TxnRequest
|
|
||||||
TxnResponse
|
|
||||||
CompactionRequest
|
|
||||||
CompactionResponse
|
|
||||||
HashRequest
|
|
||||||
HashKVRequest
|
|
||||||
HashKVResponse
|
|
||||||
HashResponse
|
|
||||||
SnapshotRequest
|
|
||||||
SnapshotResponse
|
|
||||||
WatchRequest
|
|
||||||
WatchCreateRequest
|
|
||||||
WatchCancelRequest
|
|
||||||
WatchResponse
|
|
||||||
LeaseGrantRequest
|
|
||||||
LeaseGrantResponse
|
|
||||||
LeaseRevokeRequest
|
|
||||||
LeaseRevokeResponse
|
|
||||||
LeaseKeepAliveRequest
|
|
||||||
LeaseKeepAliveResponse
|
|
||||||
LeaseTimeToLiveRequest
|
|
||||||
LeaseTimeToLiveResponse
|
|
||||||
LeaseLeasesRequest
|
|
||||||
LeaseStatus
|
|
||||||
LeaseLeasesResponse
|
|
||||||
Member
|
|
||||||
MemberAddRequest
|
|
||||||
MemberAddResponse
|
|
||||||
MemberRemoveRequest
|
|
||||||
MemberRemoveResponse
|
|
||||||
MemberUpdateRequest
|
|
||||||
MemberUpdateResponse
|
|
||||||
MemberListRequest
|
|
||||||
MemberListResponse
|
|
||||||
DefragmentRequest
|
|
||||||
DefragmentResponse
|
|
||||||
MoveLeaderRequest
|
|
||||||
MoveLeaderResponse
|
|
||||||
AlarmRequest
|
|
||||||
AlarmMember
|
|
||||||
AlarmResponse
|
|
||||||
StatusRequest
|
|
||||||
StatusResponse
|
|
||||||
AuthEnableRequest
|
|
||||||
AuthDisableRequest
|
|
||||||
AuthenticateRequest
|
|
||||||
AuthUserAddRequest
|
|
||||||
AuthUserGetRequest
|
|
||||||
AuthUserDeleteRequest
|
|
||||||
AuthUserChangePasswordRequest
|
|
||||||
AuthUserGrantRoleRequest
|
|
||||||
AuthUserRevokeRoleRequest
|
|
||||||
AuthRoleAddRequest
|
|
||||||
AuthRoleGetRequest
|
|
||||||
AuthUserListRequest
|
|
||||||
AuthRoleListRequest
|
|
||||||
AuthRoleDeleteRequest
|
|
||||||
AuthRoleGrantPermissionRequest
|
|
||||||
AuthRoleRevokePermissionRequest
|
|
||||||
AuthEnableResponse
|
|
||||||
AuthDisableResponse
|
|
||||||
AuthenticateResponse
|
|
||||||
AuthUserAddResponse
|
|
||||||
AuthUserGetResponse
|
|
||||||
AuthUserDeleteResponse
|
|
||||||
AuthUserChangePasswordResponse
|
|
||||||
AuthUserGrantRoleResponse
|
|
||||||
AuthUserRevokeRoleResponse
|
|
||||||
AuthRoleAddResponse
|
|
||||||
AuthRoleGetResponse
|
|
||||||
AuthRoleListResponse
|
|
||||||
AuthUserListResponse
|
|
||||||
AuthRoleDeleteResponse
|
|
||||||
AuthRoleGrantPermissionResponse
|
|
||||||
AuthRoleRevokePermissionResponse
|
|
||||||
*/
|
|
||||||
package etcdserverpb
|
package etcdserverpb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
|
io "io"
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
|
||||||
_ "github.com/gogo/protobuf/gogoproto"
|
_ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
io "io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@@ -128,50 +25,141 @@ var _ = math.Inf
|
|||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Request struct {
|
type Request struct {
|
||||||
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
|
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
|
||||||
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
|
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
|
||||||
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
|
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
|
||||||
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
|
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
|
||||||
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
|
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
|
||||||
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
|
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
|
||||||
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
|
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
|
||||||
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
|
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
|
||||||
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
|
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
|
||||||
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
|
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
|
||||||
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
|
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
|
||||||
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
|
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
|
||||||
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
|
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
|
||||||
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
|
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
|
||||||
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
|
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
|
||||||
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
|
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
|
||||||
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
|
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Request) Reset() { *m = Request{} }
|
func (m *Request) Reset() { *m = Request{} }
|
||||||
func (m *Request) String() string { return proto.CompactTextString(m) }
|
func (m *Request) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Request) ProtoMessage() {}
|
func (*Request) ProtoMessage() {}
|
||||||
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
|
func (*Request) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_09ffbeb3bebbce7e, []int{0}
|
||||||
|
}
|
||||||
|
func (m *Request) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Request.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Request) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Request.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Request) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Request) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Request.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Request proto.InternalMessageInfo
|
||||||
|
|
||||||
type Metadata struct {
|
type Metadata struct {
|
||||||
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
|
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
|
||||||
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
|
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) Reset() { *m = Metadata{} }
|
func (m *Metadata) Reset() { *m = Metadata{} }
|
||||||
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Metadata) ProtoMessage() {}
|
func (*Metadata) ProtoMessage() {}
|
||||||
func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} }
|
func (*Metadata) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_09ffbeb3bebbce7e, []int{1}
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Metadata.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Metadata.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Metadata proto.InternalMessageInfo
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
||||||
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
|
||||||
|
|
||||||
|
var fileDescriptor_09ffbeb3bebbce7e = []byte{
|
||||||
|
// 380 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
||||||
|
0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
|
||||||
|
0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
|
||||||
|
0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
|
||||||
|
0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
|
||||||
|
0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
|
||||||
|
0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
|
||||||
|
0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
|
||||||
|
0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
|
||||||
|
0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
|
||||||
|
0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
|
||||||
|
0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
|
||||||
|
0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
|
||||||
|
0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
|
||||||
|
0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
|
||||||
|
0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
|
||||||
|
0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
|
||||||
|
0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
|
||||||
|
0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
|
||||||
|
0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
|
||||||
|
0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
|
||||||
|
0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
|
||||||
|
0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
|
||||||
|
0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Request) Marshal() (dAtA []byte, err error) {
|
func (m *Request) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -179,123 +167,133 @@ func (m *Request) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
dAtA[i] = 0x8
|
if m.XXX_unrecognized != nil {
|
||||||
i++
|
i -= len(m.XXX_unrecognized)
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
|
|
||||||
i += copy(dAtA[i:], m.Method)
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
|
|
||||||
i += copy(dAtA[i:], m.Path)
|
|
||||||
dAtA[i] = 0x22
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
|
|
||||||
i += copy(dAtA[i:], m.Val)
|
|
||||||
dAtA[i] = 0x28
|
|
||||||
i++
|
|
||||||
if m.Dir {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
}
|
||||||
i++
|
|
||||||
dAtA[i] = 0x32
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
|
|
||||||
i += copy(dAtA[i:], m.PrevValue)
|
|
||||||
dAtA[i] = 0x38
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
|
|
||||||
if m.PrevExist != nil {
|
|
||||||
dAtA[i] = 0x40
|
|
||||||
i++
|
|
||||||
if *m.PrevExist {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
dAtA[i] = 0x48
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
|
|
||||||
dAtA[i] = 0x50
|
|
||||||
i++
|
|
||||||
if m.Wait {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
dAtA[i] = 0x58
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
|
|
||||||
dAtA[i] = 0x60
|
|
||||||
i++
|
|
||||||
if m.Recursive {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
dAtA[i] = 0x68
|
|
||||||
i++
|
|
||||||
if m.Sorted {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
dAtA[i] = 0x70
|
|
||||||
i++
|
|
||||||
if m.Quorum {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
dAtA[i] = 0x78
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
|
|
||||||
dAtA[i] = 0x80
|
|
||||||
i++
|
|
||||||
dAtA[i] = 0x1
|
|
||||||
i++
|
|
||||||
if m.Stream {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
if m.Refresh != nil {
|
if m.Refresh != nil {
|
||||||
dAtA[i] = 0x88
|
i--
|
||||||
i++
|
|
||||||
dAtA[i] = 0x1
|
|
||||||
i++
|
|
||||||
if *m.Refresh {
|
if *m.Refresh {
|
||||||
dAtA[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
dAtA[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x88
|
||||||
}
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
i--
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
if m.Stream {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
return i, nil
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x80
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x78
|
||||||
|
i--
|
||||||
|
if m.Quorum {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x70
|
||||||
|
i--
|
||||||
|
if m.Sorted {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x68
|
||||||
|
i--
|
||||||
|
if m.Recursive {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x60
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x58
|
||||||
|
i--
|
||||||
|
if m.Wait {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x50
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x48
|
||||||
|
if m.PrevExist != nil {
|
||||||
|
i--
|
||||||
|
if *m.PrevExist {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x40
|
||||||
|
}
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x38
|
||||||
|
i -= len(m.PrevValue)
|
||||||
|
copy(dAtA[i:], m.PrevValue)
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x32
|
||||||
|
i--
|
||||||
|
if m.Dir {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x28
|
||||||
|
i -= len(m.Val)
|
||||||
|
copy(dAtA[i:], m.Val)
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x22
|
||||||
|
i -= len(m.Path)
|
||||||
|
copy(dAtA[i:], m.Path)
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i -= len(m.Method)
|
||||||
|
copy(dAtA[i:], m.Method)
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) Marshal() (dAtA []byte, err error) {
|
func (m *Metadata) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -303,32 +301,43 @@ func (m *Metadata) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
dAtA[i] = 0x8
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
|
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
|
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovEtcdserver(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *Request) Size() (n int) {
|
func (m *Request) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
n += 1 + sovEtcdserver(uint64(m.ID))
|
n += 1 + sovEtcdserver(uint64(m.ID))
|
||||||
@@ -363,6 +372,9 @@ func (m *Request) Size() (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) Size() (n int) {
|
func (m *Metadata) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
n += 1 + sovEtcdserver(uint64(m.NodeID))
|
n += 1 + sovEtcdserver(uint64(m.NodeID))
|
||||||
@@ -374,14 +386,7 @@ func (m *Metadata) Size() (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func sovEtcdserver(x uint64) (n int) {
|
func sovEtcdserver(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozEtcdserver(x uint64) (n int) {
|
func sozEtcdserver(x uint64) (n int) {
|
||||||
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@@ -401,7 +406,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -429,7 +434,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ID |= (uint64(b) & 0x7F) << shift
|
m.ID |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -448,7 +453,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -458,6 +463,9 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthEtcdserver
|
return ErrInvalidLengthEtcdserver
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + intStringLen
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -477,7 +485,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -487,6 +495,9 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthEtcdserver
|
return ErrInvalidLengthEtcdserver
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + intStringLen
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -506,7 +517,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -516,6 +527,9 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthEtcdserver
|
return ErrInvalidLengthEtcdserver
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + intStringLen
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -535,7 +549,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -555,7 +569,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -565,6 +579,9 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthEtcdserver
|
return ErrInvalidLengthEtcdserver
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + intStringLen
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -584,7 +601,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.PrevIndex |= (uint64(b) & 0x7F) << shift
|
m.PrevIndex |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -603,7 +620,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -624,7 +641,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Expiration |= (int64(b) & 0x7F) << shift
|
m.Expiration |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -643,7 +660,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -663,7 +680,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Since |= (uint64(b) & 0x7F) << shift
|
m.Since |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -682,7 +699,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -702,7 +719,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -722,7 +739,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -742,7 +759,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Time |= (int64(b) & 0x7F) << shift
|
m.Time |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -761,7 +778,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -781,7 +798,7 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -797,6 +814,9 @@ func (m *Request) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthEtcdserver
|
return ErrInvalidLengthEtcdserver
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -825,7 +845,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -853,7 +873,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.NodeID |= (uint64(b) & 0x7F) << shift
|
m.NodeID |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -872,7 +892,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ClusterID |= (uint64(b) & 0x7F) << shift
|
m.ClusterID |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -886,6 +906,9 @@ func (m *Metadata) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthEtcdserver
|
return ErrInvalidLengthEtcdserver
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -953,10 +976,13 @@ func skipEtcdserver(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iNdEx += length
|
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return 0, ErrInvalidLengthEtcdserver
|
return 0, ErrInvalidLengthEtcdserver
|
||||||
}
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
for {
|
||||||
@@ -985,6 +1011,9 @@ func skipEtcdserver(dAtA []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
iNdEx = start + next
|
iNdEx = start + next
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthEtcdserver
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 4:
|
case 4:
|
||||||
@@ -1003,33 +1032,3 @@ var (
|
|||||||
ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
|
|
||||||
|
|
||||||
var fileDescriptorEtcdserver = []byte{
|
|
||||||
// 380 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
|
||||||
0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
|
|
||||||
0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
|
|
||||||
0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
|
|
||||||
0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
|
|
||||||
0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
|
|
||||||
0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
|
|
||||||
0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
|
|
||||||
0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
|
|
||||||
0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
|
|
||||||
0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
|
|
||||||
0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
|
|
||||||
0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
|
|
||||||
0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
|
|
||||||
0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
|
|
||||||
0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
|
|
||||||
0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
|
|
||||||
0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
|
|
||||||
0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
|
|
||||||
0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
|
|
||||||
0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
|
|
||||||
0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
|
|
||||||
0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
|
|
||||||
0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|||||||
1322
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
1322
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
10283
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
10283
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
22
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
22
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
@@ -351,6 +351,9 @@ message ResponseHeader {
|
|||||||
// member_id is the ID of the member which sent the response.
|
// member_id is the ID of the member which sent the response.
|
||||||
uint64 member_id = 2;
|
uint64 member_id = 2;
|
||||||
// revision is the key-value store revision when the request was applied.
|
// revision is the key-value store revision when the request was applied.
|
||||||
|
// For watch progress responses, the header.revision indicates progress. All future events
|
||||||
|
// recieved in this stream are guaranteed to have a higher revision number than the
|
||||||
|
// header.revision number.
|
||||||
int64 revision = 3;
|
int64 revision = 3;
|
||||||
// raft_term is the raft term when the request was applied.
|
// raft_term is the raft term when the request was applied.
|
||||||
uint64 raft_term = 4;
|
uint64 raft_term = 4;
|
||||||
@@ -639,6 +642,7 @@ message WatchRequest {
|
|||||||
oneof request_union {
|
oneof request_union {
|
||||||
WatchCreateRequest create_request = 1;
|
WatchCreateRequest create_request = 1;
|
||||||
WatchCancelRequest cancel_request = 2;
|
WatchCancelRequest cancel_request = 2;
|
||||||
|
WatchProgressRequest progress_request = 3;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -671,6 +675,16 @@ message WatchCreateRequest {
|
|||||||
// If prev_kv is set, created watcher gets the previous KV before the event happens.
|
// If prev_kv is set, created watcher gets the previous KV before the event happens.
|
||||||
// If the previous KV is already compacted, nothing will be returned.
|
// If the previous KV is already compacted, nothing will be returned.
|
||||||
bool prev_kv = 6;
|
bool prev_kv = 6;
|
||||||
|
|
||||||
|
// If watch_id is provided and non-zero, it will be assigned to this watcher.
|
||||||
|
// Since creating a watcher in etcd is not a synchronous operation,
|
||||||
|
// this can be used ensure that ordering is correct when creating multiple
|
||||||
|
// watchers on the same stream. Creating a watcher with an ID already in
|
||||||
|
// use on the stream will cause an error to be returned.
|
||||||
|
int64 watch_id = 7;
|
||||||
|
|
||||||
|
// fragment enables splitting large revisions into multiple watch responses.
|
||||||
|
bool fragment = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
message WatchCancelRequest {
|
message WatchCancelRequest {
|
||||||
@@ -678,6 +692,11 @@ message WatchCancelRequest {
|
|||||||
int64 watch_id = 1;
|
int64 watch_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Requests the a watch stream progress status be sent in the watch response stream as soon as
|
||||||
|
// possible.
|
||||||
|
message WatchProgressRequest {
|
||||||
|
}
|
||||||
|
|
||||||
message WatchResponse {
|
message WatchResponse {
|
||||||
ResponseHeader header = 1;
|
ResponseHeader header = 1;
|
||||||
// watch_id is the ID of the watcher that corresponds to the response.
|
// watch_id is the ID of the watcher that corresponds to the response.
|
||||||
@@ -703,6 +722,9 @@ message WatchResponse {
|
|||||||
// cancel_reason indicates the reason for canceling the watcher.
|
// cancel_reason indicates the reason for canceling the watcher.
|
||||||
string cancel_reason = 6;
|
string cancel_reason = 6;
|
||||||
|
|
||||||
|
// framgment is true if large watch response was split over multiple responses.
|
||||||
|
bool fragment = 7;
|
||||||
|
|
||||||
repeated mvccpb.Event events = 11;
|
repeated mvccpb.Event events = 11;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
7
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
7
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
@@ -54,6 +54,12 @@ var (
|
|||||||
Name: "slow_apply_total",
|
Name: "slow_apply_total",
|
||||||
Help: "The total number of slow apply requests (likely overloaded from slow disk).",
|
Help: "The total number of slow apply requests (likely overloaded from slow disk).",
|
||||||
})
|
})
|
||||||
|
applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "server",
|
||||||
|
Name: "snapshot_apply_in_progress_total",
|
||||||
|
Help: "1 if the server is applying the incoming snapshot. 0 if none.",
|
||||||
|
})
|
||||||
proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
|
proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "server",
|
Subsystem: "server",
|
||||||
@@ -131,6 +137,7 @@ func init() {
|
|||||||
prometheus.MustRegister(leaderChanges)
|
prometheus.MustRegister(leaderChanges)
|
||||||
prometheus.MustRegister(heartbeatSendFailures)
|
prometheus.MustRegister(heartbeatSendFailures)
|
||||||
prometheus.MustRegister(slowApplies)
|
prometheus.MustRegister(slowApplies)
|
||||||
|
prometheus.MustRegister(applySnapshotInProgress)
|
||||||
prometheus.MustRegister(proposalsCommitted)
|
prometheus.MustRegister(proposalsCommitted)
|
||||||
prometheus.MustRegister(proposalsApplied)
|
prometheus.MustRegister(proposalsApplied)
|
||||||
prometheus.MustRegister(proposalsPending)
|
prometheus.MustRegister(proposalsPending)
|
||||||
|
|||||||
29
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
29
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
@@ -198,7 +198,9 @@ type EtcdServer struct {
|
|||||||
// stopping is closed by run goroutine on shutdown.
|
// stopping is closed by run goroutine on shutdown.
|
||||||
stopping chan struct{}
|
stopping chan struct{}
|
||||||
// done is closed when all goroutines from start() complete.
|
// done is closed when all goroutines from start() complete.
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
leaderChanged chan struct{}
|
||||||
|
leaderChangedMu sync.RWMutex
|
||||||
|
|
||||||
errorc chan error
|
errorc chan error
|
||||||
id types.ID
|
id types.ID
|
||||||
@@ -597,6 +599,7 @@ func (s *EtcdServer) start() {
|
|||||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
s.ctx, s.cancel = context.WithCancel(context.Background())
|
||||||
s.readwaitc = make(chan struct{}, 1)
|
s.readwaitc = make(chan struct{}, 1)
|
||||||
s.readNotifier = newNotifier()
|
s.readNotifier = newNotifier()
|
||||||
|
s.leaderChanged = make(chan struct{})
|
||||||
if s.ClusterVersion() != nil {
|
if s.ClusterVersion() != nil {
|
||||||
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
|
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
|
||||||
} else {
|
} else {
|
||||||
@@ -733,6 +736,17 @@ func (s *EtcdServer) run() {
|
|||||||
s.compactor.Resume()
|
s.compactor.Resume()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if newLeader {
|
||||||
|
select {
|
||||||
|
case s.leaderChanged <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
s.leaderChangedMu.Lock()
|
||||||
|
lc := s.leaderChanged
|
||||||
|
s.leaderChanged = make(chan struct{})
|
||||||
|
s.leaderChangedMu.Unlock()
|
||||||
|
close(lc)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: remove the nil checking
|
// TODO: remove the nil checking
|
||||||
// current test utility does not provide the stats
|
// current test utility does not provide the stats
|
||||||
@@ -841,6 +855,12 @@ func (s *EtcdServer) run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
|
||||||
|
s.leaderChangedMu.RLock()
|
||||||
|
defer s.leaderChangedMu.RUnlock()
|
||||||
|
return s.leaderChanged
|
||||||
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
||||||
s.applySnapshot(ep, apply)
|
s.applySnapshot(ep, apply)
|
||||||
s.applyEntries(ep, apply)
|
s.applyEntries(ep, apply)
|
||||||
@@ -866,9 +886,12 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
|||||||
if raft.IsEmptySnap(apply.snapshot) {
|
if raft.IsEmptySnap(apply.snapshot) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
applySnapshotInProgress.Inc()
|
||||||
plog.Infof("applying snapshot at index %d...", ep.snapi)
|
plog.Infof("applying snapshot at index %d...", ep.snapi)
|
||||||
defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
|
defer func() {
|
||||||
|
plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
|
||||||
|
applySnapshotInProgress.Dec()
|
||||||
|
}()
|
||||||
|
|
||||||
if apply.snapshot.Metadata.Index <= ep.appliedi {
|
if apply.snapshot.Metadata.Index <= ep.appliedi {
|
||||||
plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
|
plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
|
||||||
|
|||||||
9
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
9
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
@@ -614,7 +614,10 @@ func (s *EtcdServer) linearizableReadLoop() {
|
|||||||
id1 := s.reqIDGen.Next()
|
id1 := s.reqIDGen.Next()
|
||||||
binary.BigEndian.PutUint64(ctxToSend, id1)
|
binary.BigEndian.PutUint64(ctxToSend, id1)
|
||||||
|
|
||||||
|
leaderChangedNotifier := s.leaderChangedNotify()
|
||||||
select {
|
select {
|
||||||
|
case <-leaderChangedNotifier:
|
||||||
|
continue
|
||||||
case <-s.readwaitc:
|
case <-s.readwaitc:
|
||||||
case <-s.stopping:
|
case <-s.stopping:
|
||||||
return
|
return
|
||||||
@@ -659,6 +662,12 @@ func (s *EtcdServer) linearizableReadLoop() {
|
|||||||
slowReadIndex.Inc()
|
slowReadIndex.Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case <-leaderChangedNotifier:
|
||||||
|
timeout = true
|
||||||
|
readIndexFailed.Inc()
|
||||||
|
// return a retryable error.
|
||||||
|
nr.notify(ErrLeaderChanged)
|
||||||
|
|
||||||
case <-time.After(s.Cfg.ReqTimeout()):
|
case <-time.After(s.Cfg.ReqTimeout()):
|
||||||
plog.Warningf("timed out waiting for read index response (local node might have slow network)")
|
plog.Warningf("timed out waiting for read index response (local node might have slow network)")
|
||||||
nr.notify(ErrTimeout)
|
nr.notify(ErrTimeout)
|
||||||
|
|||||||
1
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
1
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
@@ -647,6 +647,7 @@ func NewClientV3(m *member) (*clientv3.Client, error) {
|
|||||||
cfg := clientv3.Config{
|
cfg := clientv3.Config{
|
||||||
Endpoints: []string{m.grpcAddr},
|
Endpoints: []string{m.grpcAddr},
|
||||||
DialTimeout: 5 * time.Second,
|
DialTimeout: 5 * time.Second,
|
||||||
|
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||||
MaxCallSendMsgSize: m.clientMaxCallSendMsgSize,
|
MaxCallSendMsgSize: m.clientMaxCallSendMsgSize,
|
||||||
MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize,
|
MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize,
|
||||||
}
|
}
|
||||||
|
|||||||
360
vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go
generated
vendored
360
vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go
generated
vendored
@@ -1,31 +1,17 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: lease.proto
|
// source: lease.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package leasepb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
lease.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Lease
|
|
||||||
LeaseInternalRequest
|
|
||||||
LeaseInternalResponse
|
|
||||||
*/
|
|
||||||
package leasepb
|
package leasepb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
|
io "io"
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
_ "github.com/gogo/protobuf/gogoproto"
|
|
||||||
|
|
||||||
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
_ "github.com/gogo/protobuf/gogoproto"
|
||||||
io "io"
|
proto "github.com/golang/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@@ -40,42 +26,157 @@ var _ = math.Inf
|
|||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Lease struct {
|
type Lease struct {
|
||||||
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||||
TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
|
TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Lease) Reset() { *m = Lease{} }
|
func (m *Lease) Reset() { *m = Lease{} }
|
||||||
func (m *Lease) String() string { return proto.CompactTextString(m) }
|
func (m *Lease) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Lease) ProtoMessage() {}
|
func (*Lease) ProtoMessage() {}
|
||||||
func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} }
|
func (*Lease) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_3dd57e402472b33a, []int{0}
|
||||||
|
}
|
||||||
|
func (m *Lease) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Lease.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Lease) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Lease.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Lease) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Lease) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Lease.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Lease proto.InternalMessageInfo
|
||||||
|
|
||||||
type LeaseInternalRequest struct {
|
type LeaseInternalRequest struct {
|
||||||
LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest" json:"LeaseTimeToLiveRequest,omitempty"`
|
LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest,proto3" json:"LeaseTimeToLiveRequest,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
|
func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
|
||||||
func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
|
func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LeaseInternalRequest) ProtoMessage() {}
|
func (*LeaseInternalRequest) ProtoMessage() {}
|
||||||
func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{1} }
|
func (*LeaseInternalRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_3dd57e402472b33a, []int{1}
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_LeaseInternalRequest.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LeaseInternalRequest.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalRequest) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LeaseInternalRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LeaseInternalRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
type LeaseInternalResponse struct {
|
type LeaseInternalResponse struct {
|
||||||
LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse" json:"LeaseTimeToLiveResponse,omitempty"`
|
LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse,proto3" json:"LeaseTimeToLiveResponse,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
|
func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
|
||||||
func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
|
func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LeaseInternalResponse) ProtoMessage() {}
|
func (*LeaseInternalResponse) ProtoMessage() {}
|
||||||
func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{2} }
|
func (*LeaseInternalResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_3dd57e402472b33a, []int{2}
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_LeaseInternalResponse.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LeaseInternalResponse.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalResponse) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LeaseInternalResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LeaseInternalResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Lease)(nil), "leasepb.Lease")
|
proto.RegisterType((*Lease)(nil), "leasepb.Lease")
|
||||||
proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
|
proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
|
||||||
proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
|
proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) }
|
||||||
|
|
||||||
|
var fileDescriptor_3dd57e402472b33a = []byte{
|
||||||
|
// 233 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
|
||||||
|
0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
|
||||||
|
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45,
|
||||||
|
0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92,
|
||||||
|
0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48,
|
||||||
|
0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8,
|
||||||
|
0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4,
|
||||||
|
0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0,
|
||||||
|
0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7,
|
||||||
|
0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14,
|
||||||
|
0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29,
|
||||||
|
0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72,
|
||||||
|
0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
|
||||||
|
0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Lease) Marshal() (dAtA []byte, err error) {
|
func (m *Lease) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -83,27 +184,36 @@ func (m *Lease) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.ID != 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0x8
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintLease(dAtA, i, uint64(m.ID))
|
|
||||||
}
|
}
|
||||||
if m.TTL != 0 {
|
if m.TTL != 0 {
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintLease(dAtA, i, uint64(m.TTL))
|
i = encodeVarintLease(dAtA, i, uint64(m.TTL))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.ID != 0 {
|
||||||
|
i = encodeVarintLease(dAtA, i, uint64(m.ID))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
|
func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -111,27 +221,38 @@ func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) {
|
func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.LeaseTimeToLiveRequest != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveRequest.Size()))
|
|
||||||
n1, err := m.LeaseTimeToLiveRequest.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n1
|
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.LeaseTimeToLiveRequest != nil {
|
||||||
|
{
|
||||||
|
size, err := m.LeaseTimeToLiveRequest.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintLease(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
|
func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -139,33 +260,49 @@ func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) {
|
func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.LeaseTimeToLiveResponse != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveResponse.Size()))
|
|
||||||
n2, err := m.LeaseTimeToLiveResponse.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n2
|
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.LeaseTimeToLiveResponse != nil {
|
||||||
|
{
|
||||||
|
size, err := m.LeaseTimeToLiveResponse.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintLease(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintLease(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintLease(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovLease(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *Lease) Size() (n int) {
|
func (m *Lease) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.ID != 0 {
|
if m.ID != 0 {
|
||||||
@@ -174,38 +311,46 @@ func (m *Lease) Size() (n int) {
|
|||||||
if m.TTL != 0 {
|
if m.TTL != 0 {
|
||||||
n += 1 + sovLease(uint64(m.TTL))
|
n += 1 + sovLease(uint64(m.TTL))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalRequest) Size() (n int) {
|
func (m *LeaseInternalRequest) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.LeaseTimeToLiveRequest != nil {
|
if m.LeaseTimeToLiveRequest != nil {
|
||||||
l = m.LeaseTimeToLiveRequest.Size()
|
l = m.LeaseTimeToLiveRequest.Size()
|
||||||
n += 1 + l + sovLease(uint64(l))
|
n += 1 + l + sovLease(uint64(l))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LeaseInternalResponse) Size() (n int) {
|
func (m *LeaseInternalResponse) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.LeaseTimeToLiveResponse != nil {
|
if m.LeaseTimeToLiveResponse != nil {
|
||||||
l = m.LeaseTimeToLiveResponse.Size()
|
l = m.LeaseTimeToLiveResponse.Size()
|
||||||
n += 1 + l + sovLease(uint64(l))
|
n += 1 + l + sovLease(uint64(l))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func sovLease(x uint64) (n int) {
|
func sovLease(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozLease(x uint64) (n int) {
|
func sozLease(x uint64) (n int) {
|
||||||
return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@@ -225,7 +370,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -253,7 +398,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ID |= (int64(b) & 0x7F) << shift
|
m.ID |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -272,7 +417,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.TTL |= (int64(b) & 0x7F) << shift
|
m.TTL |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -286,9 +431,13 @@ func (m *Lease) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthLease
|
return ErrInvalidLengthLease
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -313,7 +462,7 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -341,7 +490,7 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -350,6 +499,9 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthLease
|
return ErrInvalidLengthLease
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + msglen
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -369,9 +521,13 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthLease
|
return ErrInvalidLengthLease
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -396,7 +552,7 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -424,7 +580,7 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -433,6 +589,9 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthLease
|
return ErrInvalidLengthLease
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + msglen
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -452,9 +611,13 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthLease
|
return ErrInvalidLengthLease
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -518,10 +681,13 @@ func skipLease(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iNdEx += length
|
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return 0, ErrInvalidLengthLease
|
return 0, ErrInvalidLengthLease
|
||||||
}
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthLease
|
||||||
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
for {
|
||||||
@@ -550,6 +716,9 @@ func skipLease(dAtA []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
iNdEx = start + next
|
iNdEx = start + next
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthLease
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 4:
|
case 4:
|
||||||
@@ -568,24 +737,3 @@ var (
|
|||||||
ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) }
|
|
||||||
|
|
||||||
var fileDescriptorLease = []byte{
|
|
||||||
// 233 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
|
|
||||||
0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
|
|
||||||
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45,
|
|
||||||
0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92,
|
|
||||||
0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48,
|
|
||||||
0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8,
|
|
||||||
0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4,
|
|
||||||
0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0,
|
|
||||||
0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7,
|
|
||||||
0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14,
|
|
||||||
0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29,
|
|
||||||
0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72,
|
|
||||||
0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
|
|
||||||
0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff,
|
|
||||||
0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|||||||
372
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
372
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
@@ -1,28 +1,16 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: kv.proto
|
// source: kv.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package mvccpb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
kv.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
KeyValue
|
|
||||||
Event
|
|
||||||
*/
|
|
||||||
package mvccpb
|
package mvccpb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
|
io "io"
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
|
||||||
_ "github.com/gogo/protobuf/gogoproto"
|
_ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
io "io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@@ -47,6 +35,7 @@ var Event_EventType_name = map[int32]string{
|
|||||||
0: "PUT",
|
0: "PUT",
|
||||||
1: "DELETE",
|
1: "DELETE",
|
||||||
}
|
}
|
||||||
|
|
||||||
var Event_EventType_value = map[string]int32{
|
var Event_EventType_value = map[string]int32{
|
||||||
"PUT": 0,
|
"PUT": 0,
|
||||||
"DELETE": 1,
|
"DELETE": 1,
|
||||||
@@ -55,7 +44,10 @@ var Event_EventType_value = map[string]int32{
|
|||||||
func (x Event_EventType) String() string {
|
func (x Event_EventType) String() string {
|
||||||
return proto.EnumName(Event_EventType_name, int32(x))
|
return proto.EnumName(Event_EventType_name, int32(x))
|
||||||
}
|
}
|
||||||
func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} }
|
|
||||||
|
func (Event_EventType) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_2216fe83c9c12408, []int{1, 0}
|
||||||
|
}
|
||||||
|
|
||||||
type KeyValue struct {
|
type KeyValue struct {
|
||||||
// key is the key in bytes. An empty key is not allowed.
|
// key is the key in bytes. An empty key is not allowed.
|
||||||
@@ -73,13 +65,44 @@ type KeyValue struct {
|
|||||||
// lease is the ID of the lease that attached to key.
|
// lease is the ID of the lease that attached to key.
|
||||||
// When the attached lease expires, the key will be deleted.
|
// When the attached lease expires, the key will be deleted.
|
||||||
// If lease is 0, then no lease is attached to the key.
|
// If lease is 0, then no lease is attached to the key.
|
||||||
Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
|
Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *KeyValue) Reset() { *m = KeyValue{} }
|
func (m *KeyValue) Reset() { *m = KeyValue{} }
|
||||||
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
|
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
|
||||||
func (*KeyValue) ProtoMessage() {}
|
func (*KeyValue) ProtoMessage() {}
|
||||||
func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} }
|
func (*KeyValue) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_2216fe83c9c12408, []int{0}
|
||||||
|
}
|
||||||
|
func (m *KeyValue) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *KeyValue) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_KeyValue.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *KeyValue) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *KeyValue) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_KeyValue.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_KeyValue proto.InternalMessageInfo
|
||||||
|
|
||||||
type Event struct {
|
type Event struct {
|
||||||
// type is the kind of event. If type is a PUT, it indicates
|
// type is the kind of event. If type is a PUT, it indicates
|
||||||
@@ -91,25 +114,82 @@ type Event struct {
|
|||||||
// A PUT event with kv.Version=1 indicates the creation of a key.
|
// A PUT event with kv.Version=1 indicates the creation of a key.
|
||||||
// A DELETE/EXPIRE event contains the deleted key with
|
// A DELETE/EXPIRE event contains the deleted key with
|
||||||
// its modification revision set to the revision of deletion.
|
// its modification revision set to the revision of deletion.
|
||||||
Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
|
Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
|
||||||
// prev_kv holds the key-value pair before the event happens.
|
// prev_kv holds the key-value pair before the event happens.
|
||||||
PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
|
PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Event) Reset() { *m = Event{} }
|
func (m *Event) Reset() { *m = Event{} }
|
||||||
func (m *Event) String() string { return proto.CompactTextString(m) }
|
func (m *Event) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Event) ProtoMessage() {}
|
func (*Event) ProtoMessage() {}
|
||||||
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} }
|
func (*Event) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_2216fe83c9c12408, []int{1}
|
||||||
|
}
|
||||||
|
func (m *Event) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Event.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Event) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Event.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Event) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Event) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Event.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Event proto.InternalMessageInfo
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
|
||||||
proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
|
proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
|
||||||
proto.RegisterType((*Event)(nil), "mvccpb.Event")
|
proto.RegisterType((*Event)(nil), "mvccpb.Event")
|
||||||
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
|
||||||
|
|
||||||
|
var fileDescriptor_2216fe83c9c12408 = []byte{
|
||||||
|
// 303 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
|
||||||
|
0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
|
||||||
|
0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
|
||||||
|
0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
|
||||||
|
0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
|
||||||
|
0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
|
||||||
|
0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
|
||||||
|
0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
|
||||||
|
0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
|
||||||
|
0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
|
||||||
|
0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
|
||||||
|
0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
|
||||||
|
0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
|
||||||
|
0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
|
||||||
|
0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
|
||||||
|
0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
|
||||||
|
0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
|
||||||
|
0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
|
||||||
|
0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
|
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -117,49 +197,60 @@ func (m *KeyValue) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
|
func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Key) > 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
|
|
||||||
i += copy(dAtA[i:], m.Key)
|
|
||||||
}
|
|
||||||
if m.CreateRevision != 0 {
|
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
|
|
||||||
}
|
|
||||||
if m.ModRevision != 0 {
|
|
||||||
dAtA[i] = 0x18
|
|
||||||
i++
|
|
||||||
i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
|
|
||||||
}
|
|
||||||
if m.Version != 0 {
|
|
||||||
dAtA[i] = 0x20
|
|
||||||
i++
|
|
||||||
i = encodeVarintKv(dAtA, i, uint64(m.Version))
|
|
||||||
}
|
|
||||||
if len(m.Value) > 0 {
|
|
||||||
dAtA[i] = 0x2a
|
|
||||||
i++
|
|
||||||
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
|
|
||||||
i += copy(dAtA[i:], m.Value)
|
|
||||||
}
|
}
|
||||||
if m.Lease != 0 {
|
if m.Lease != 0 {
|
||||||
dAtA[i] = 0x30
|
|
||||||
i++
|
|
||||||
i = encodeVarintKv(dAtA, i, uint64(m.Lease))
|
i = encodeVarintKv(dAtA, i, uint64(m.Lease))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x30
|
||||||
}
|
}
|
||||||
return i, nil
|
if len(m.Value) > 0 {
|
||||||
|
i -= len(m.Value)
|
||||||
|
copy(dAtA[i:], m.Value)
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
}
|
||||||
|
if m.Version != 0 {
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.Version))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x20
|
||||||
|
}
|
||||||
|
if m.ModRevision != 0 {
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x18
|
||||||
|
}
|
||||||
|
if m.CreateRevision != 0 {
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
}
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
i -= len(m.Key)
|
||||||
|
copy(dAtA[i:], m.Key)
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Event) Marshal() (dAtA []byte, err error) {
|
func (m *Event) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -167,48 +258,66 @@ func (m *Event) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Event) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Event) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Type != 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0x8
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintKv(dAtA, i, uint64(m.Type))
|
|
||||||
}
|
|
||||||
if m.Kv != nil {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size()))
|
|
||||||
n1, err := m.Kv.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n1
|
|
||||||
}
|
}
|
||||||
if m.PrevKv != nil {
|
if m.PrevKv != nil {
|
||||||
dAtA[i] = 0x1a
|
{
|
||||||
i++
|
size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
|
||||||
i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size()))
|
if err != nil {
|
||||||
n2, err := m.PrevKv.MarshalTo(dAtA[i:])
|
return 0, err
|
||||||
if err != nil {
|
}
|
||||||
return 0, err
|
i -= size
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(size))
|
||||||
}
|
}
|
||||||
i += n2
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.Kv != nil {
|
||||||
|
{
|
||||||
|
size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
if m.Type != 0 {
|
||||||
|
i = encodeVarintKv(dAtA, i, uint64(m.Type))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovKv(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *KeyValue) Size() (n int) {
|
func (m *KeyValue) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
l = len(m.Key)
|
l = len(m.Key)
|
||||||
@@ -231,10 +340,16 @@ func (m *KeyValue) Size() (n int) {
|
|||||||
if m.Lease != 0 {
|
if m.Lease != 0 {
|
||||||
n += 1 + sovKv(uint64(m.Lease))
|
n += 1 + sovKv(uint64(m.Lease))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Event) Size() (n int) {
|
func (m *Event) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Type != 0 {
|
if m.Type != 0 {
|
||||||
@@ -248,18 +363,14 @@ func (m *Event) Size() (n int) {
|
|||||||
l = m.PrevKv.Size()
|
l = m.PrevKv.Size()
|
||||||
n += 1 + l + sovKv(uint64(l))
|
n += 1 + l + sovKv(uint64(l))
|
||||||
}
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func sovKv(x uint64) (n int) {
|
func sovKv(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozKv(x uint64) (n int) {
|
func sozKv(x uint64) (n int) {
|
||||||
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@@ -279,7 +390,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -307,7 +418,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -316,6 +427,9 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthKv
|
return ErrInvalidLengthKv
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -338,7 +452,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.CreateRevision |= (int64(b) & 0x7F) << shift
|
m.CreateRevision |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -357,7 +471,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ModRevision |= (int64(b) & 0x7F) << shift
|
m.ModRevision |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -376,7 +490,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Version |= (int64(b) & 0x7F) << shift
|
m.Version |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -395,7 +509,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -404,6 +518,9 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthKv
|
return ErrInvalidLengthKv
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -426,7 +543,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Lease |= (int64(b) & 0x7F) << shift
|
m.Lease |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -440,9 +557,13 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthKv
|
return ErrInvalidLengthKv
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -467,7 +588,7 @@ func (m *Event) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -495,7 +616,7 @@ func (m *Event) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Type |= (Event_EventType(b) & 0x7F) << shift
|
m.Type |= Event_EventType(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -514,7 +635,7 @@ func (m *Event) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -523,6 +644,9 @@ func (m *Event) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthKv
|
return ErrInvalidLengthKv
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + msglen
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -547,7 +671,7 @@ func (m *Event) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -556,6 +680,9 @@ func (m *Event) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthKv
|
return ErrInvalidLengthKv
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + msglen
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -575,9 +702,13 @@ func (m *Event) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthKv
|
return ErrInvalidLengthKv
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthKv
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -641,10 +772,13 @@ func skipKv(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iNdEx += length
|
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return 0, ErrInvalidLengthKv
|
return 0, ErrInvalidLengthKv
|
||||||
}
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthKv
|
||||||
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
for {
|
||||||
@@ -673,6 +807,9 @@ func skipKv(dAtA []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
iNdEx = start + next
|
iNdEx = start + next
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthKv
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 4:
|
case 4:
|
||||||
@@ -691,28 +828,3 @@ var (
|
|||||||
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) }
|
|
||||||
|
|
||||||
var fileDescriptorKv = []byte{
|
|
||||||
// 303 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
|
|
||||||
0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
|
|
||||||
0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
|
|
||||||
0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
|
|
||||||
0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
|
|
||||||
0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
|
|
||||||
0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
|
|
||||||
0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
|
|
||||||
0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
|
|
||||||
0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
|
|
||||||
0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
|
|
||||||
0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
|
|
||||||
0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
|
|
||||||
0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
|
|
||||||
0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
|
|
||||||
0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
|
|
||||||
0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
|
|
||||||
0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
|
|
||||||
0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|||||||
1
vendor/github.com/coreos/etcd/mvcc/watcher_group.go
generated
vendored
1
vendor/github.com/coreos/etcd/mvcc/watcher_group.go
generated
vendored
@@ -156,6 +156,7 @@ type watcherGroup struct {
|
|||||||
func newWatcherGroup() watcherGroup {
|
func newWatcherGroup() watcherGroup {
|
||||||
return watcherGroup{
|
return watcherGroup{
|
||||||
keyWatchers: make(watcherSetByKey),
|
keyWatchers: make(watcherSetByKey),
|
||||||
|
ranges: adt.NewIntervalTree(),
|
||||||
watchers: make(watcherSet),
|
watchers: make(watcherSet),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
48
vendor/github.com/coreos/etcd/pkg/adt/README.md
generated
vendored
Normal file
48
vendor/github.com/coreos/etcd/pkg/adt/README.md
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
|
||||||
|
## Red-Black Tree
|
||||||
|
|
||||||
|
*"Introduction to Algorithms" (Cormen et al, 3rd ed.), Chapter 13*
|
||||||
|
|
||||||
|
1. Every node is either red or black.
|
||||||
|
2. The root is black.
|
||||||
|
3. Every leaf (NIL) is black.
|
||||||
|
4. If a node is red, then both its children are black.
|
||||||
|
5. For each node, all simple paths from the node to descendant leaves contain the
|
||||||
|
same number of black nodes.
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.etcd.io/etcd/pkg/adt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
ivt := adt.NewIntervalTree()
|
||||||
|
ivt.Insert(NewInt64Interval(510, 511), 0)
|
||||||
|
ivt.Insert(NewInt64Interval(82, 83), 0)
|
||||||
|
ivt.Insert(NewInt64Interval(830, 831), 0)
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
After inserting the values `510`, `82`, `830`, `11`, `383`, `647`, `899`, `261`, `410`, `514`, `815`, `888`, `972`, `238`, `292`, `953`.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Deleting the node `514` should not trigger any rebalancing:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Deleting the node `11` triggers multiple rotates for rebalancing:
|
||||||
|
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
Try yourself at https://www.cs.usfca.edu/~galles/visualization/RedBlack.html.
|
||||||
546
vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
generated
vendored
546
vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
generated
vendored
@@ -16,7 +16,9 @@ package adt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Comparable is an interface for trichotomic comparisons.
|
// Comparable is an interface for trichotomic comparisons.
|
||||||
@@ -35,6 +37,17 @@ const (
|
|||||||
red
|
red
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (c rbcolor) String() string {
|
||||||
|
switch c {
|
||||||
|
case black:
|
||||||
|
return "black"
|
||||||
|
case red:
|
||||||
|
return "black"
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unknown color %d", c))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Interval implements a Comparable interval [begin, end)
|
// Interval implements a Comparable interval [begin, end)
|
||||||
// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
|
// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
|
||||||
type Interval struct {
|
type Interval struct {
|
||||||
@@ -74,39 +87,39 @@ type intervalNode struct {
|
|||||||
c rbcolor
|
c rbcolor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *intervalNode) color() rbcolor {
|
func (x *intervalNode) color(sentinel *intervalNode) rbcolor {
|
||||||
if x == nil {
|
if x == sentinel {
|
||||||
return black
|
return black
|
||||||
}
|
}
|
||||||
return x.c
|
return x.c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *intervalNode) height() int {
|
func (x *intervalNode) height(sentinel *intervalNode) int {
|
||||||
if n == nil {
|
if x == sentinel {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
ld := n.left.height()
|
ld := x.left.height(sentinel)
|
||||||
rd := n.right.height()
|
rd := x.right.height(sentinel)
|
||||||
if ld < rd {
|
if ld < rd {
|
||||||
return rd + 1
|
return rd + 1
|
||||||
}
|
}
|
||||||
return ld + 1
|
return ld + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *intervalNode) min() *intervalNode {
|
func (x *intervalNode) min(sentinel *intervalNode) *intervalNode {
|
||||||
for x.left != nil {
|
for x.left != sentinel {
|
||||||
x = x.left
|
x = x.left
|
||||||
}
|
}
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
// successor is the next in-order node in the tree
|
// successor is the next in-order node in the tree
|
||||||
func (x *intervalNode) successor() *intervalNode {
|
func (x *intervalNode) successor(sentinel *intervalNode) *intervalNode {
|
||||||
if x.right != nil {
|
if x.right != sentinel {
|
||||||
return x.right.min()
|
return x.right.min(sentinel)
|
||||||
}
|
}
|
||||||
y := x.parent
|
y := x.parent
|
||||||
for y != nil && x == y.right {
|
for y != sentinel && x == y.right {
|
||||||
x = y
|
x = y
|
||||||
y = y.parent
|
y = y.parent
|
||||||
}
|
}
|
||||||
@@ -114,14 +127,14 @@ func (x *intervalNode) successor() *intervalNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// updateMax updates the maximum values for a node and its ancestors
|
// updateMax updates the maximum values for a node and its ancestors
|
||||||
func (x *intervalNode) updateMax() {
|
func (x *intervalNode) updateMax(sentinel *intervalNode) {
|
||||||
for x != nil {
|
for x != sentinel {
|
||||||
oldmax := x.max
|
oldmax := x.max
|
||||||
max := x.iv.Ivl.End
|
max := x.iv.Ivl.End
|
||||||
if x.left != nil && x.left.max.Compare(max) > 0 {
|
if x.left != sentinel && x.left.max.Compare(max) > 0 {
|
||||||
max = x.left.max
|
max = x.left.max
|
||||||
}
|
}
|
||||||
if x.right != nil && x.right.max.Compare(max) > 0 {
|
if x.right != sentinel && x.right.max.Compare(max) > 0 {
|
||||||
max = x.right.max
|
max = x.right.max
|
||||||
}
|
}
|
||||||
if oldmax.Compare(max) == 0 {
|
if oldmax.Compare(max) == 0 {
|
||||||
@@ -135,66 +148,151 @@ func (x *intervalNode) updateMax() {
|
|||||||
type nodeVisitor func(n *intervalNode) bool
|
type nodeVisitor func(n *intervalNode) bool
|
||||||
|
|
||||||
// visit will call a node visitor on each node that overlaps the given interval
|
// visit will call a node visitor on each node that overlaps the given interval
|
||||||
func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool {
|
func (x *intervalNode) visit(iv *Interval, sentinel *intervalNode, nv nodeVisitor) bool {
|
||||||
if x == nil {
|
if x == sentinel {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
v := iv.Compare(&x.iv.Ivl)
|
v := iv.Compare(&x.iv.Ivl)
|
||||||
switch {
|
switch {
|
||||||
case v < 0:
|
case v < 0:
|
||||||
if !x.left.visit(iv, nv) {
|
if !x.left.visit(iv, sentinel, nv) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
case v > 0:
|
case v > 0:
|
||||||
maxiv := Interval{x.iv.Ivl.Begin, x.max}
|
maxiv := Interval{x.iv.Ivl.Begin, x.max}
|
||||||
if maxiv.Compare(iv) == 0 {
|
if maxiv.Compare(iv) == 0 {
|
||||||
if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) {
|
if !x.left.visit(iv, sentinel, nv) || !x.right.visit(iv, sentinel, nv) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) {
|
if !x.left.visit(iv, sentinel, nv) || !nv(x) || !x.right.visit(iv, sentinel, nv) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IntervalValue represents a range tree node that contains a range and a value.
|
||||||
type IntervalValue struct {
|
type IntervalValue struct {
|
||||||
Ivl Interval
|
Ivl Interval
|
||||||
Val interface{}
|
Val interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IntervalTree represents a (mostly) textbook implementation of the
|
// IntervalTree represents a (mostly) textbook implementation of the
|
||||||
// "Introduction to Algorithms" (Cormen et al, 2nd ed.) chapter 13 red-black tree
|
// "Introduction to Algorithms" (Cormen et al, 3rd ed.) chapter 13 red-black tree
|
||||||
// and chapter 14.3 interval tree with search supporting "stabbing queries".
|
// and chapter 14.3 interval tree with search supporting "stabbing queries".
|
||||||
type IntervalTree struct {
|
type IntervalTree interface {
|
||||||
|
// Insert adds a node with the given interval into the tree.
|
||||||
|
Insert(ivl Interval, val interface{})
|
||||||
|
// Delete removes the node with the given interval from the tree, returning
|
||||||
|
// true if a node is in fact removed.
|
||||||
|
Delete(ivl Interval) bool
|
||||||
|
// Len gives the number of elements in the tree.
|
||||||
|
Len() int
|
||||||
|
// Height is the number of levels in the tree; one node has height 1.
|
||||||
|
Height() int
|
||||||
|
// MaxHeight is the expected maximum tree height given the number of nodes.
|
||||||
|
MaxHeight() int
|
||||||
|
// Visit calls a visitor function on every tree node intersecting the given interval.
|
||||||
|
// It will visit each interval [x, y) in ascending order sorted on x.
|
||||||
|
Visit(ivl Interval, ivv IntervalVisitor)
|
||||||
|
// Find gets the IntervalValue for the node matching the given interval
|
||||||
|
Find(ivl Interval) *IntervalValue
|
||||||
|
// Intersects returns true if there is some tree node intersecting the given interval.
|
||||||
|
Intersects(iv Interval) bool
|
||||||
|
// Contains returns true if the interval tree's keys cover the entire given interval.
|
||||||
|
Contains(ivl Interval) bool
|
||||||
|
// Stab returns a slice with all elements in the tree intersecting the interval.
|
||||||
|
Stab(iv Interval) []*IntervalValue
|
||||||
|
// Union merges a given interval tree into the receiver.
|
||||||
|
Union(inIvt IntervalTree, ivl Interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIntervalTree returns a new interval tree.
|
||||||
|
func NewIntervalTree() IntervalTree {
|
||||||
|
sentinel := &intervalNode{
|
||||||
|
iv: IntervalValue{},
|
||||||
|
max: nil,
|
||||||
|
left: nil,
|
||||||
|
right: nil,
|
||||||
|
parent: nil,
|
||||||
|
c: black,
|
||||||
|
}
|
||||||
|
return &intervalTree{
|
||||||
|
root: sentinel,
|
||||||
|
count: 0,
|
||||||
|
sentinel: sentinel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type intervalTree struct {
|
||||||
root *intervalNode
|
root *intervalNode
|
||||||
count int
|
count int
|
||||||
|
|
||||||
|
// red-black NIL node
|
||||||
|
// use 'sentinel' as a dummy object to simplify boundary conditions
|
||||||
|
// use the sentinel to treat a nil child of a node x as an ordinary node whose parent is x
|
||||||
|
// use one shared sentinel to represent all nil leaves and the root's parent
|
||||||
|
sentinel *intervalNode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: make this consistent with textbook implementation
|
||||||
|
//
|
||||||
|
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324
|
||||||
|
//
|
||||||
|
// 0. RB-DELETE(T, z)
|
||||||
|
// 1.
|
||||||
|
// 2. y = z
|
||||||
|
// 3. y-original-color = y.color
|
||||||
|
// 4.
|
||||||
|
// 5. if z.left == T.nil
|
||||||
|
// 6. x = z.right
|
||||||
|
// 7. RB-TRANSPLANT(T, z, z.right)
|
||||||
|
// 8. else if z.right == T.nil
|
||||||
|
// 9. x = z.left
|
||||||
|
// 10. RB-TRANSPLANT(T, z, z.left)
|
||||||
|
// 11. else
|
||||||
|
// 12. y = TREE-MINIMUM(z.right)
|
||||||
|
// 13. y-original-color = y.color
|
||||||
|
// 14. x = y.right
|
||||||
|
// 15. if y.p == z
|
||||||
|
// 16. x.p = y
|
||||||
|
// 17. else
|
||||||
|
// 18. RB-TRANSPLANT(T, y, y.right)
|
||||||
|
// 19. y.right = z.right
|
||||||
|
// 20. y.right.p = y
|
||||||
|
// 21. RB-TRANSPLANT(T, z, y)
|
||||||
|
// 22. y.left = z.left
|
||||||
|
// 23. y.left.p = y
|
||||||
|
// 24. y.color = z.color
|
||||||
|
// 25.
|
||||||
|
// 26. if y-original-color == BLACK
|
||||||
|
// 27. RB-DELETE-FIXUP(T, x)
|
||||||
|
|
||||||
// Delete removes the node with the given interval from the tree, returning
|
// Delete removes the node with the given interval from the tree, returning
|
||||||
// true if a node is in fact removed.
|
// true if a node is in fact removed.
|
||||||
func (ivt *IntervalTree) Delete(ivl Interval) bool {
|
func (ivt *intervalTree) Delete(ivl Interval) bool {
|
||||||
z := ivt.find(ivl)
|
z := ivt.find(ivl)
|
||||||
if z == nil {
|
if z == ivt.sentinel {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
y := z
|
y := z
|
||||||
if z.left != nil && z.right != nil {
|
if z.left != ivt.sentinel && z.right != ivt.sentinel {
|
||||||
y = z.successor()
|
y = z.successor(ivt.sentinel)
|
||||||
}
|
}
|
||||||
|
|
||||||
x := y.left
|
x := ivt.sentinel
|
||||||
if x == nil {
|
if y.left != ivt.sentinel {
|
||||||
|
x = y.left
|
||||||
|
} else if y.right != ivt.sentinel {
|
||||||
x = y.right
|
x = y.right
|
||||||
}
|
}
|
||||||
if x != nil {
|
|
||||||
x.parent = y.parent
|
|
||||||
}
|
|
||||||
|
|
||||||
if y.parent == nil {
|
x.parent = y.parent
|
||||||
|
|
||||||
|
if y.parent == ivt.sentinel {
|
||||||
ivt.root = x
|
ivt.root = x
|
||||||
} else {
|
} else {
|
||||||
if y == y.parent.left {
|
if y == y.parent.left {
|
||||||
@@ -202,14 +300,14 @@ func (ivt *IntervalTree) Delete(ivl Interval) bool {
|
|||||||
} else {
|
} else {
|
||||||
y.parent.right = x
|
y.parent.right = x
|
||||||
}
|
}
|
||||||
y.parent.updateMax()
|
y.parent.updateMax(ivt.sentinel)
|
||||||
}
|
}
|
||||||
if y != z {
|
if y != z {
|
||||||
z.iv = y.iv
|
z.iv = y.iv
|
||||||
z.updateMax()
|
z.updateMax(ivt.sentinel)
|
||||||
}
|
}
|
||||||
|
|
||||||
if y.color() == black && x != nil {
|
if y.color(ivt.sentinel) == black {
|
||||||
ivt.deleteFixup(x)
|
ivt.deleteFixup(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -217,11 +315,55 @@ func (ivt *IntervalTree) Delete(ivl Interval) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ivt *IntervalTree) deleteFixup(x *intervalNode) {
|
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326
|
||||||
for x != ivt.root && x.color() == black && x.parent != nil {
|
//
|
||||||
if x == x.parent.left {
|
// 0. RB-DELETE-FIXUP(T, z)
|
||||||
|
// 1.
|
||||||
|
// 2. while x ≠ T.root and x.color == BLACK
|
||||||
|
// 3. if x == x.p.left
|
||||||
|
// 4. w = x.p.right
|
||||||
|
// 5. if w.color == RED
|
||||||
|
// 6. w.color = BLACK
|
||||||
|
// 7. x.p.color = RED
|
||||||
|
// 8. LEFT-ROTATE(T, x, p)
|
||||||
|
// 9. if w.left.color == BLACK and w.right.color == BLACK
|
||||||
|
// 10. w.color = RED
|
||||||
|
// 11. x = x.p
|
||||||
|
// 12. else if w.right.color == BLACK
|
||||||
|
// 13. w.left.color = BLACK
|
||||||
|
// 14. w.color = RED
|
||||||
|
// 15. RIGHT-ROTATE(T, w)
|
||||||
|
// 16. w = w.p.right
|
||||||
|
// 17. w.color = x.p.color
|
||||||
|
// 18. x.p.color = BLACK
|
||||||
|
// 19. LEFT-ROTATE(T, w.p)
|
||||||
|
// 20. x = T.root
|
||||||
|
// 21. else
|
||||||
|
// 22. w = x.p.left
|
||||||
|
// 23. if w.color == RED
|
||||||
|
// 24. w.color = BLACK
|
||||||
|
// 25. x.p.color = RED
|
||||||
|
// 26. RIGHT-ROTATE(T, x, p)
|
||||||
|
// 27. if w.right.color == BLACK and w.left.color == BLACK
|
||||||
|
// 28. w.color = RED
|
||||||
|
// 29. x = x.p
|
||||||
|
// 30. else if w.left.color == BLACK
|
||||||
|
// 31. w.right.color = BLACK
|
||||||
|
// 32. w.color = RED
|
||||||
|
// 33. LEFT-ROTATE(T, w)
|
||||||
|
// 34. w = w.p.left
|
||||||
|
// 35. w.color = x.p.color
|
||||||
|
// 36. x.p.color = BLACK
|
||||||
|
// 37. RIGHT-ROTATE(T, w.p)
|
||||||
|
// 38. x = T.root
|
||||||
|
// 39.
|
||||||
|
// 40. x.color = BLACK
|
||||||
|
//
|
||||||
|
func (ivt *intervalTree) deleteFixup(x *intervalNode) {
|
||||||
|
for x != ivt.root && x.color(ivt.sentinel) == black {
|
||||||
|
if x == x.parent.left { // line 3-20
|
||||||
w := x.parent.right
|
w := x.parent.right
|
||||||
if w.color() == red {
|
if w.color(ivt.sentinel) == red {
|
||||||
w.c = black
|
w.c = black
|
||||||
x.parent.c = red
|
x.parent.c = red
|
||||||
ivt.rotateLeft(x.parent)
|
ivt.rotateLeft(x.parent)
|
||||||
@@ -230,26 +372,26 @@ func (ivt *IntervalTree) deleteFixup(x *intervalNode) {
|
|||||||
if w == nil {
|
if w == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if w.left.color() == black && w.right.color() == black {
|
if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
|
||||||
w.c = red
|
w.c = red
|
||||||
x = x.parent
|
x = x.parent
|
||||||
} else {
|
} else {
|
||||||
if w.right.color() == black {
|
if w.right.color(ivt.sentinel) == black {
|
||||||
w.left.c = black
|
w.left.c = black
|
||||||
w.c = red
|
w.c = red
|
||||||
ivt.rotateRight(w)
|
ivt.rotateRight(w)
|
||||||
w = x.parent.right
|
w = x.parent.right
|
||||||
}
|
}
|
||||||
w.c = x.parent.color()
|
w.c = x.parent.color(ivt.sentinel)
|
||||||
x.parent.c = black
|
x.parent.c = black
|
||||||
w.right.c = black
|
w.right.c = black
|
||||||
ivt.rotateLeft(x.parent)
|
ivt.rotateLeft(x.parent)
|
||||||
x = ivt.root
|
x = ivt.root
|
||||||
}
|
}
|
||||||
} else {
|
} else { // line 22-38
|
||||||
// same as above but with left and right exchanged
|
// same as above but with left and right exchanged
|
||||||
w := x.parent.left
|
w := x.parent.left
|
||||||
if w.color() == red {
|
if w.color(ivt.sentinel) == red {
|
||||||
w.c = black
|
w.c = black
|
||||||
x.parent.c = red
|
x.parent.c = red
|
||||||
ivt.rotateRight(x.parent)
|
ivt.rotateRight(x.parent)
|
||||||
@@ -258,17 +400,17 @@ func (ivt *IntervalTree) deleteFixup(x *intervalNode) {
|
|||||||
if w == nil {
|
if w == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if w.left.color() == black && w.right.color() == black {
|
if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
|
||||||
w.c = red
|
w.c = red
|
||||||
x = x.parent
|
x = x.parent
|
||||||
} else {
|
} else {
|
||||||
if w.left.color() == black {
|
if w.left.color(ivt.sentinel) == black {
|
||||||
w.right.c = black
|
w.right.c = black
|
||||||
w.c = red
|
w.c = red
|
||||||
ivt.rotateLeft(w)
|
ivt.rotateLeft(w)
|
||||||
w = x.parent.left
|
w = x.parent.left
|
||||||
}
|
}
|
||||||
w.c = x.parent.color()
|
w.c = x.parent.color(ivt.sentinel)
|
||||||
x.parent.c = black
|
x.parent.c = black
|
||||||
w.left.c = black
|
w.left.c = black
|
||||||
ivt.rotateRight(x.parent)
|
ivt.rotateRight(x.parent)
|
||||||
@@ -276,17 +418,60 @@ func (ivt *IntervalTree) deleteFixup(x *intervalNode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if x != nil {
|
if x != nil {
|
||||||
x.c = black
|
x.c = black
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *intervalNode {
|
||||||
|
return &intervalNode{
|
||||||
|
iv: IntervalValue{ivl, val},
|
||||||
|
max: ivl.End,
|
||||||
|
c: red,
|
||||||
|
left: ivt.sentinel,
|
||||||
|
right: ivt.sentinel,
|
||||||
|
parent: ivt.sentinel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: make this consistent with textbook implementation
|
||||||
|
//
|
||||||
|
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315
|
||||||
|
//
|
||||||
|
// 0. RB-INSERT(T, z)
|
||||||
|
// 1.
|
||||||
|
// 2. y = T.nil
|
||||||
|
// 3. x = T.root
|
||||||
|
// 4.
|
||||||
|
// 5. while x ≠ T.nil
|
||||||
|
// 6. y = x
|
||||||
|
// 7. if z.key < x.key
|
||||||
|
// 8. x = x.left
|
||||||
|
// 9. else
|
||||||
|
// 10. x = x.right
|
||||||
|
// 11.
|
||||||
|
// 12. z.p = y
|
||||||
|
// 13.
|
||||||
|
// 14. if y == T.nil
|
||||||
|
// 15. T.root = z
|
||||||
|
// 16. else if z.key < y.key
|
||||||
|
// 17. y.left = z
|
||||||
|
// 18. else
|
||||||
|
// 19. y.right = z
|
||||||
|
// 20.
|
||||||
|
// 21. z.left = T.nil
|
||||||
|
// 22. z.right = T.nil
|
||||||
|
// 23. z.color = RED
|
||||||
|
// 24.
|
||||||
|
// 25. RB-INSERT-FIXUP(T, z)
|
||||||
|
|
||||||
// Insert adds a node with the given interval into the tree.
|
// Insert adds a node with the given interval into the tree.
|
||||||
func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) {
|
func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
|
||||||
var y *intervalNode
|
y := ivt.sentinel
|
||||||
z := &intervalNode{iv: IntervalValue{ivl, val}, max: ivl.End, c: red}
|
z := ivt.createIntervalNode(ivl, val)
|
||||||
x := ivt.root
|
x := ivt.root
|
||||||
for x != nil {
|
for x != ivt.sentinel {
|
||||||
y = x
|
y = x
|
||||||
if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
|
if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
|
||||||
x = x.left
|
x = x.left
|
||||||
@@ -296,7 +481,7 @@ func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
z.parent = y
|
z.parent = y
|
||||||
if y == nil {
|
if y == ivt.sentinel {
|
||||||
ivt.root = z
|
ivt.root = z
|
||||||
} else {
|
} else {
|
||||||
if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
|
if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
|
||||||
@@ -304,18 +489,54 @@ func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) {
|
|||||||
} else {
|
} else {
|
||||||
y.right = z
|
y.right = z
|
||||||
}
|
}
|
||||||
y.updateMax()
|
y.updateMax(ivt.sentinel)
|
||||||
}
|
}
|
||||||
z.c = red
|
z.c = red
|
||||||
|
|
||||||
ivt.insertFixup(z)
|
ivt.insertFixup(z)
|
||||||
ivt.count++
|
ivt.count++
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ivt *IntervalTree) insertFixup(z *intervalNode) {
|
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316
|
||||||
for z.parent != nil && z.parent.parent != nil && z.parent.color() == red {
|
//
|
||||||
if z.parent == z.parent.parent.left {
|
// 0. RB-INSERT-FIXUP(T, z)
|
||||||
|
// 1.
|
||||||
|
// 2. while z.p.color == RED
|
||||||
|
// 3. if z.p == z.p.p.left
|
||||||
|
// 4. y = z.p.p.right
|
||||||
|
// 5. if y.color == RED
|
||||||
|
// 6. z.p.color = BLACK
|
||||||
|
// 7. y.color = BLACK
|
||||||
|
// 8. z.p.p.color = RED
|
||||||
|
// 9. z = z.p.p
|
||||||
|
// 10. else if z == z.p.right
|
||||||
|
// 11. z = z.p
|
||||||
|
// 12. LEFT-ROTATE(T, z)
|
||||||
|
// 13. z.p.color = BLACK
|
||||||
|
// 14. z.p.p.color = RED
|
||||||
|
// 15. RIGHT-ROTATE(T, z.p.p)
|
||||||
|
// 16. else
|
||||||
|
// 17. y = z.p.p.left
|
||||||
|
// 18. if y.color == RED
|
||||||
|
// 19. z.p.color = BLACK
|
||||||
|
// 20. y.color = BLACK
|
||||||
|
// 21. z.p.p.color = RED
|
||||||
|
// 22. z = z.p.p
|
||||||
|
// 23. else if z == z.p.right
|
||||||
|
// 24. z = z.p
|
||||||
|
// 25. RIGHT-ROTATE(T, z)
|
||||||
|
// 26. z.p.color = BLACK
|
||||||
|
// 27. z.p.p.color = RED
|
||||||
|
// 28. LEFT-ROTATE(T, z.p.p)
|
||||||
|
// 29.
|
||||||
|
// 30. T.root.color = BLACK
|
||||||
|
//
|
||||||
|
func (ivt *intervalTree) insertFixup(z *intervalNode) {
|
||||||
|
for z.parent.color(ivt.sentinel) == red {
|
||||||
|
if z.parent == z.parent.parent.left { // line 3-15
|
||||||
|
|
||||||
y := z.parent.parent.right
|
y := z.parent.parent.right
|
||||||
if y.color() == red {
|
if y.color(ivt.sentinel) == red {
|
||||||
y.c = black
|
y.c = black
|
||||||
z.parent.c = black
|
z.parent.c = black
|
||||||
z.parent.parent.c = red
|
z.parent.parent.c = red
|
||||||
@@ -329,10 +550,10 @@ func (ivt *IntervalTree) insertFixup(z *intervalNode) {
|
|||||||
z.parent.parent.c = red
|
z.parent.parent.c = red
|
||||||
ivt.rotateRight(z.parent.parent)
|
ivt.rotateRight(z.parent.parent)
|
||||||
}
|
}
|
||||||
} else {
|
} else { // line 16-28
|
||||||
// same as then with left/right exchanged
|
// same as then with left/right exchanged
|
||||||
y := z.parent.parent.left
|
y := z.parent.parent.left
|
||||||
if y.color() == red {
|
if y.color(ivt.sentinel) == red {
|
||||||
y.c = black
|
y.c = black
|
||||||
z.parent.c = black
|
z.parent.c = black
|
||||||
z.parent.parent.c = red
|
z.parent.parent.c = red
|
||||||
@@ -348,42 +569,109 @@ func (ivt *IntervalTree) insertFixup(z *intervalNode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// line 30
|
||||||
ivt.root.c = black
|
ivt.root.c = black
|
||||||
}
|
}
|
||||||
|
|
||||||
// rotateLeft moves x so it is left of its right child
|
// rotateLeft moves x so it is left of its right child
|
||||||
func (ivt *IntervalTree) rotateLeft(x *intervalNode) {
|
//
|
||||||
y := x.right
|
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313
|
||||||
x.right = y.left
|
//
|
||||||
if y.left != nil {
|
// 0. LEFT-ROTATE(T, x)
|
||||||
y.left.parent = x
|
// 1.
|
||||||
}
|
// 2. y = x.right
|
||||||
x.updateMax()
|
// 3. x.right = y.left
|
||||||
ivt.replaceParent(x, y)
|
// 4.
|
||||||
y.left = x
|
// 5. if y.left ≠ T.nil
|
||||||
y.updateMax()
|
// 6. y.left.p = x
|
||||||
}
|
// 7.
|
||||||
|
// 8. y.p = x.p
|
||||||
// rotateLeft moves x so it is right of its left child
|
// 9.
|
||||||
func (ivt *IntervalTree) rotateRight(x *intervalNode) {
|
// 10. if x.p == T.nil
|
||||||
if x == nil {
|
// 11. T.root = y
|
||||||
|
// 12. else if x == x.p.left
|
||||||
|
// 13. x.p.left = y
|
||||||
|
// 14. else
|
||||||
|
// 15. x.p.right = y
|
||||||
|
// 16.
|
||||||
|
// 17. y.left = x
|
||||||
|
// 18. x.p = y
|
||||||
|
//
|
||||||
|
func (ivt *intervalTree) rotateLeft(x *intervalNode) {
|
||||||
|
// rotateLeft x must have right child
|
||||||
|
if x.right == ivt.sentinel {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// line 2-3
|
||||||
|
y := x.right
|
||||||
|
x.right = y.left
|
||||||
|
|
||||||
|
// line 5-6
|
||||||
|
if y.left != ivt.sentinel {
|
||||||
|
y.left.parent = x
|
||||||
|
}
|
||||||
|
x.updateMax(ivt.sentinel)
|
||||||
|
|
||||||
|
// line 10-15, 18
|
||||||
|
ivt.replaceParent(x, y)
|
||||||
|
|
||||||
|
// line 17
|
||||||
|
y.left = x
|
||||||
|
y.updateMax(ivt.sentinel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotateRight moves x so it is right of its left child
|
||||||
|
//
|
||||||
|
// 0. RIGHT-ROTATE(T, x)
|
||||||
|
// 1.
|
||||||
|
// 2. y = x.left
|
||||||
|
// 3. x.left = y.right
|
||||||
|
// 4.
|
||||||
|
// 5. if y.right ≠ T.nil
|
||||||
|
// 6. y.right.p = x
|
||||||
|
// 7.
|
||||||
|
// 8. y.p = x.p
|
||||||
|
// 9.
|
||||||
|
// 10. if x.p == T.nil
|
||||||
|
// 11. T.root = y
|
||||||
|
// 12. else if x == x.p.right
|
||||||
|
// 13. x.p.right = y
|
||||||
|
// 14. else
|
||||||
|
// 15. x.p.left = y
|
||||||
|
// 16.
|
||||||
|
// 17. y.right = x
|
||||||
|
// 18. x.p = y
|
||||||
|
//
|
||||||
|
func (ivt *intervalTree) rotateRight(x *intervalNode) {
|
||||||
|
// rotateRight x must have left child
|
||||||
|
if x.left == ivt.sentinel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// line 2-3
|
||||||
y := x.left
|
y := x.left
|
||||||
x.left = y.right
|
x.left = y.right
|
||||||
if y.right != nil {
|
|
||||||
|
// line 5-6
|
||||||
|
if y.right != ivt.sentinel {
|
||||||
y.right.parent = x
|
y.right.parent = x
|
||||||
}
|
}
|
||||||
x.updateMax()
|
x.updateMax(ivt.sentinel)
|
||||||
|
|
||||||
|
// line 10-15, 18
|
||||||
ivt.replaceParent(x, y)
|
ivt.replaceParent(x, y)
|
||||||
|
|
||||||
|
// line 17
|
||||||
y.right = x
|
y.right = x
|
||||||
y.updateMax()
|
y.updateMax(ivt.sentinel)
|
||||||
}
|
}
|
||||||
|
|
||||||
// replaceParent replaces x's parent with y
|
// replaceParent replaces x's parent with y
|
||||||
func (ivt *IntervalTree) replaceParent(x *intervalNode, y *intervalNode) {
|
func (ivt *intervalTree) replaceParent(x *intervalNode, y *intervalNode) {
|
||||||
y.parent = x.parent
|
y.parent = x.parent
|
||||||
if x.parent == nil {
|
if x.parent == ivt.sentinel {
|
||||||
ivt.root = y
|
ivt.root = y
|
||||||
} else {
|
} else {
|
||||||
if x == x.parent.left {
|
if x == x.parent.left {
|
||||||
@@ -391,19 +679,19 @@ func (ivt *IntervalTree) replaceParent(x *intervalNode, y *intervalNode) {
|
|||||||
} else {
|
} else {
|
||||||
x.parent.right = y
|
x.parent.right = y
|
||||||
}
|
}
|
||||||
x.parent.updateMax()
|
x.parent.updateMax(ivt.sentinel)
|
||||||
}
|
}
|
||||||
x.parent = y
|
x.parent = y
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len gives the number of elements in the tree
|
// Len gives the number of elements in the tree
|
||||||
func (ivt *IntervalTree) Len() int { return ivt.count }
|
func (ivt *intervalTree) Len() int { return ivt.count }
|
||||||
|
|
||||||
// Height is the number of levels in the tree; one node has height 1.
|
// Height is the number of levels in the tree; one node has height 1.
|
||||||
func (ivt *IntervalTree) Height() int { return ivt.root.height() }
|
func (ivt *intervalTree) Height() int { return ivt.root.height(ivt.sentinel) }
|
||||||
|
|
||||||
// MaxHeight is the expected maximum tree height given the number of nodes
|
// MaxHeight is the expected maximum tree height given the number of nodes
|
||||||
func (ivt *IntervalTree) MaxHeight() int {
|
func (ivt *intervalTree) MaxHeight() int {
|
||||||
return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
|
return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -412,12 +700,13 @@ type IntervalVisitor func(n *IntervalValue) bool
|
|||||||
|
|
||||||
// Visit calls a visitor function on every tree node intersecting the given interval.
|
// Visit calls a visitor function on every tree node intersecting the given interval.
|
||||||
// It will visit each interval [x, y) in ascending order sorted on x.
|
// It will visit each interval [x, y) in ascending order sorted on x.
|
||||||
func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
|
func (ivt *intervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
|
||||||
ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) })
|
ivt.root.visit(&ivl, ivt.sentinel, func(n *intervalNode) bool { return ivv(&n.iv) })
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the exact node for a given interval
|
// find the exact node for a given interval
|
||||||
func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) {
|
func (ivt *intervalTree) find(ivl Interval) *intervalNode {
|
||||||
|
ret := ivt.sentinel
|
||||||
f := func(n *intervalNode) bool {
|
f := func(n *intervalNode) bool {
|
||||||
if n.iv.Ivl != ivl {
|
if n.iv.Ivl != ivl {
|
||||||
return true
|
return true
|
||||||
@@ -425,34 +714,34 @@ func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) {
|
|||||||
ret = n
|
ret = n
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
ivt.root.visit(&ivl, f)
|
ivt.root.visit(&ivl, ivt.sentinel, f)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find gets the IntervalValue for the node matching the given interval
|
// Find gets the IntervalValue for the node matching the given interval
|
||||||
func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) {
|
func (ivt *intervalTree) Find(ivl Interval) (ret *IntervalValue) {
|
||||||
n := ivt.find(ivl)
|
n := ivt.find(ivl)
|
||||||
if n == nil {
|
if n == ivt.sentinel {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &n.iv
|
return &n.iv
|
||||||
}
|
}
|
||||||
|
|
||||||
// Intersects returns true if there is some tree node intersecting the given interval.
|
// Intersects returns true if there is some tree node intersecting the given interval.
|
||||||
func (ivt *IntervalTree) Intersects(iv Interval) bool {
|
func (ivt *intervalTree) Intersects(iv Interval) bool {
|
||||||
x := ivt.root
|
x := ivt.root
|
||||||
for x != nil && iv.Compare(&x.iv.Ivl) != 0 {
|
for x != ivt.sentinel && iv.Compare(&x.iv.Ivl) != 0 {
|
||||||
if x.left != nil && x.left.max.Compare(iv.Begin) > 0 {
|
if x.left != ivt.sentinel && x.left.max.Compare(iv.Begin) > 0 {
|
||||||
x = x.left
|
x = x.left
|
||||||
} else {
|
} else {
|
||||||
x = x.right
|
x = x.right
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return x != nil
|
return x != ivt.sentinel
|
||||||
}
|
}
|
||||||
|
|
||||||
// Contains returns true if the interval tree's keys cover the entire given interval.
|
// Contains returns true if the interval tree's keys cover the entire given interval.
|
||||||
func (ivt *IntervalTree) Contains(ivl Interval) bool {
|
func (ivt *intervalTree) Contains(ivl Interval) bool {
|
||||||
var maxEnd, minBegin Comparable
|
var maxEnd, minBegin Comparable
|
||||||
|
|
||||||
isContiguous := true
|
isContiguous := true
|
||||||
@@ -476,7 +765,7 @@ func (ivt *IntervalTree) Contains(ivl Interval) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stab returns a slice with all elements in the tree intersecting the interval.
|
// Stab returns a slice with all elements in the tree intersecting the interval.
|
||||||
func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
|
func (ivt *intervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
|
||||||
if ivt.count == 0 {
|
if ivt.count == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -486,7 +775,7 @@ func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Union merges a given interval tree into the receiver.
|
// Union merges a given interval tree into the receiver.
|
||||||
func (ivt *IntervalTree) Union(inIvt IntervalTree, ivl Interval) {
|
func (ivt *intervalTree) Union(inIvt IntervalTree, ivl Interval) {
|
||||||
f := func(n *IntervalValue) bool {
|
f := func(n *IntervalValue) bool {
|
||||||
ivt.Insert(n.Ivl, n.Val)
|
ivt.Insert(n.Ivl, n.Val)
|
||||||
return true
|
return true
|
||||||
@@ -494,6 +783,63 @@ func (ivt *IntervalTree) Union(inIvt IntervalTree, ivl Interval) {
|
|||||||
inIvt.Visit(ivl, f)
|
inIvt.Visit(ivl, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type visitedInterval struct {
|
||||||
|
root Interval
|
||||||
|
left Interval
|
||||||
|
right Interval
|
||||||
|
color rbcolor
|
||||||
|
depth int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vi visitedInterval) String() string {
|
||||||
|
bd := new(strings.Builder)
|
||||||
|
bd.WriteString(fmt.Sprintf("root [%v,%v,%v], left [%v,%v], right [%v,%v], depth %d",
|
||||||
|
vi.root.Begin, vi.root.End, vi.color,
|
||||||
|
vi.left.Begin, vi.left.End,
|
||||||
|
vi.right.Begin, vi.right.End,
|
||||||
|
vi.depth,
|
||||||
|
))
|
||||||
|
return bd.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// visitLevel traverses tree in level order.
|
||||||
|
// used for testing
|
||||||
|
func (ivt *intervalTree) visitLevel() []visitedInterval {
|
||||||
|
if ivt.root == ivt.sentinel {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rs := make([]visitedInterval, 0, ivt.Len())
|
||||||
|
|
||||||
|
type pair struct {
|
||||||
|
node *intervalNode
|
||||||
|
depth int
|
||||||
|
}
|
||||||
|
queue := []pair{{ivt.root, 0}}
|
||||||
|
for len(queue) > 0 {
|
||||||
|
f := queue[0]
|
||||||
|
queue = queue[1:]
|
||||||
|
|
||||||
|
vi := visitedInterval{
|
||||||
|
root: f.node.iv.Ivl,
|
||||||
|
color: f.node.color(ivt.sentinel),
|
||||||
|
depth: f.depth,
|
||||||
|
}
|
||||||
|
if f.node.left != ivt.sentinel {
|
||||||
|
vi.left = f.node.left.iv.Ivl
|
||||||
|
queue = append(queue, pair{f.node.left, f.depth + 1})
|
||||||
|
}
|
||||||
|
if f.node.right != ivt.sentinel {
|
||||||
|
vi.right = f.node.right.iv.Ivl
|
||||||
|
queue = append(queue, pair{f.node.right, f.depth + 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
rs = append(rs, vi)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
|
||||||
type StringComparable string
|
type StringComparable string
|
||||||
|
|
||||||
func (s StringComparable) Compare(c Comparable) int {
|
func (s StringComparable) Compare(c Comparable) int {
|
||||||
@@ -543,6 +889,7 @@ func (s StringAffineComparable) Compare(c Comparable) int {
|
|||||||
func NewStringAffineInterval(begin, end string) Interval {
|
func NewStringAffineInterval(begin, end string) Interval {
|
||||||
return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
|
return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStringAffinePoint(s string) Interval {
|
func NewStringAffinePoint(s string) Interval {
|
||||||
return NewStringAffineInterval(s, s+"\x00")
|
return NewStringAffineInterval(s, s+"\x00")
|
||||||
}
|
}
|
||||||
@@ -551,6 +898,10 @@ func NewInt64Interval(a int64, b int64) Interval {
|
|||||||
return Interval{Int64Comparable(a), Int64Comparable(b)}
|
return Interval{Int64Comparable(a), Int64Comparable(b)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newInt64EmptyInterval() Interval {
|
||||||
|
return Interval{Begin: nil, End: nil}
|
||||||
|
}
|
||||||
|
|
||||||
func NewInt64Point(a int64) Interval {
|
func NewInt64Point(a int64) Interval {
|
||||||
return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
|
return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
|
||||||
}
|
}
|
||||||
@@ -591,6 +942,7 @@ func (b BytesAffineComparable) Compare(c Comparable) int {
|
|||||||
func NewBytesAffineInterval(begin, end []byte) Interval {
|
func NewBytesAffineInterval(begin, end []byte) Interval {
|
||||||
return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)}
|
return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBytesAffinePoint(b []byte) Interval {
|
func NewBytesAffinePoint(b []byte) Interval {
|
||||||
be := make([]byte, len(b)+1)
|
be := make([]byte, len(b)+1)
|
||||||
copy(be, b)
|
copy(be, b)
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
generated
vendored
@@ -27,7 +27,7 @@ var byteOrder binary.ByteOrder
|
|||||||
func ByteOrder() binary.ByteOrder { return byteOrder }
|
func ByteOrder() binary.ByteOrder { return byteOrder }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
var i int = 0x1
|
i := int(0x1)
|
||||||
if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
|
if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
|
||||||
byteOrder = binary.BigEndian
|
byteOrder = binary.BigEndian
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
63
vendor/github.com/coreos/etcd/pkg/logutil/BUILD
generated
vendored
63
vendor/github.com/coreos/etcd/pkg/logutil/BUILD
generated
vendored
@@ -2,11 +2,70 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = ["merge_logger.go"],
|
srcs = [
|
||||||
|
"discard_logger.go",
|
||||||
|
"doc.go",
|
||||||
|
"log_level.go",
|
||||||
|
"logger.go",
|
||||||
|
"merge_logger.go",
|
||||||
|
"package_logger.go",
|
||||||
|
"zap.go",
|
||||||
|
"zap_grpc.go",
|
||||||
|
"zap_journal.go",
|
||||||
|
"zap_raft.go",
|
||||||
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/pkg/logutil",
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/pkg/logutil",
|
||||||
importpath = "github.com/coreos/etcd/pkg/logutil",
|
importpath = "github.com/coreos/etcd/pkg/logutil",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"],
|
deps = [
|
||||||
|
"//vendor/github.com/coreos/etcd/raft:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||||
|
"//vendor/go.uber.org/zap:go_default_library",
|
||||||
|
"//vendor/go.uber.org/zap/zapcore:go_default_library",
|
||||||
|
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||||
|
] + select({
|
||||||
|
"@io_bazel_rules_go//go/platform:android": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:darwin": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:linux": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:nacl": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:plan9": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:solaris": [
|
||||||
|
"//vendor/github.com/coreos/etcd/pkg/systemd:go_default_library",
|
||||||
|
"//vendor/github.com/coreos/go-systemd/journal:go_default_library",
|
||||||
|
],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
|
|||||||
46
vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
generated
vendored
Normal file
46
vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// assert that "discardLogger" satisfy "Logger" interface
|
||||||
|
var _ Logger = &discardLogger{}
|
||||||
|
|
||||||
|
// NewDiscardLogger returns a new Logger that discards everything except "fatal".
|
||||||
|
func NewDiscardLogger() Logger { return &discardLogger{} }
|
||||||
|
|
||||||
|
type discardLogger struct{}
|
||||||
|
|
||||||
|
func (l *discardLogger) Info(args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Infoln(args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Infof(format string, args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Warning(args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Warningln(args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Warningf(format string, args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Error(args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Errorln(args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Errorf(format string, args ...interface{}) {}
|
||||||
|
func (l *discardLogger) Fatal(args ...interface{}) { log.Fatal(args...) }
|
||||||
|
func (l *discardLogger) Fatalln(args ...interface{}) { log.Fatalln(args...) }
|
||||||
|
func (l *discardLogger) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) }
|
||||||
|
func (l *discardLogger) V(lvl int) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
|
||||||
16
vendor/github.com/coreos/etcd/pkg/logutil/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/pkg/logutil/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package logutil includes utilities to facilitate logging.
|
||||||
|
package logutil
|
||||||
70
vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
generated
vendored
Normal file
70
vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
// Copyright 2019 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/coreos/pkg/capnslog"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
)
|
||||||
|
|
||||||
|
var DefaultLogLevel = "info"
|
||||||
|
|
||||||
|
// ConvertToZapLevel converts log level string to zapcore.Level.
|
||||||
|
func ConvertToZapLevel(lvl string) zapcore.Level {
|
||||||
|
switch lvl {
|
||||||
|
case "debug":
|
||||||
|
return zap.DebugLevel
|
||||||
|
case "info":
|
||||||
|
return zap.InfoLevel
|
||||||
|
case "warn":
|
||||||
|
return zap.WarnLevel
|
||||||
|
case "error":
|
||||||
|
return zap.ErrorLevel
|
||||||
|
case "dpanic":
|
||||||
|
return zap.DPanicLevel
|
||||||
|
case "panic":
|
||||||
|
return zap.PanicLevel
|
||||||
|
case "fatal":
|
||||||
|
return zap.FatalLevel
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown level %q", lvl))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel.
|
||||||
|
// TODO: deprecate this in 3.5
|
||||||
|
func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel {
|
||||||
|
switch lvl {
|
||||||
|
case "debug":
|
||||||
|
return capnslog.DEBUG
|
||||||
|
case "info":
|
||||||
|
return capnslog.INFO
|
||||||
|
case "warn":
|
||||||
|
return capnslog.WARNING
|
||||||
|
case "error":
|
||||||
|
return capnslog.ERROR
|
||||||
|
case "dpanic":
|
||||||
|
return capnslog.CRITICAL
|
||||||
|
case "panic":
|
||||||
|
return capnslog.CRITICAL
|
||||||
|
case "fatal":
|
||||||
|
return capnslog.CRITICAL
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown level %q", lvl))
|
||||||
|
}
|
||||||
|
}
|
||||||
64
vendor/github.com/coreos/etcd/pkg/logutil/logger.go
generated
vendored
Normal file
64
vendor/github.com/coreos/etcd/pkg/logutil/logger.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import "google.golang.org/grpc/grpclog"
|
||||||
|
|
||||||
|
// Logger defines logging interface.
|
||||||
|
// TODO: deprecate in v3.5.
|
||||||
|
type Logger interface {
|
||||||
|
grpclog.LoggerV2
|
||||||
|
|
||||||
|
// Lvl returns logger if logger's verbosity level >= "lvl".
|
||||||
|
// Otherwise, logger that discards everything.
|
||||||
|
Lvl(lvl int) grpclog.LoggerV2
|
||||||
|
}
|
||||||
|
|
||||||
|
// assert that "defaultLogger" satisfy "Logger" interface
|
||||||
|
var _ Logger = &defaultLogger{}
|
||||||
|
|
||||||
|
// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// var defaultLogger Logger
|
||||||
|
// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
|
||||||
|
// defaultLogger = NewLogger(g)
|
||||||
|
//
|
||||||
|
func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
|
||||||
|
|
||||||
|
type defaultLogger struct {
|
||||||
|
g grpclog.LoggerV2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *defaultLogger) Info(args ...interface{}) { l.g.Info(args...) }
|
||||||
|
func (l *defaultLogger) Infoln(args ...interface{}) { l.g.Info(args...) }
|
||||||
|
func (l *defaultLogger) Infof(format string, args ...interface{}) { l.g.Infof(format, args...) }
|
||||||
|
func (l *defaultLogger) Warning(args ...interface{}) { l.g.Warning(args...) }
|
||||||
|
func (l *defaultLogger) Warningln(args ...interface{}) { l.g.Warning(args...) }
|
||||||
|
func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
|
||||||
|
func (l *defaultLogger) Error(args ...interface{}) { l.g.Error(args...) }
|
||||||
|
func (l *defaultLogger) Errorln(args ...interface{}) { l.g.Error(args...) }
|
||||||
|
func (l *defaultLogger) Errorf(format string, args ...interface{}) { l.g.Errorf(format, args...) }
|
||||||
|
func (l *defaultLogger) Fatal(args ...interface{}) { l.g.Fatal(args...) }
|
||||||
|
func (l *defaultLogger) Fatalln(args ...interface{}) { l.g.Fatal(args...) }
|
||||||
|
func (l *defaultLogger) Fatalf(format string, args ...interface{}) { l.g.Fatalf(format, args...) }
|
||||||
|
func (l *defaultLogger) V(lvl int) bool { return l.g.V(lvl) }
|
||||||
|
func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
|
||||||
|
if l.g.V(lvl) {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
return &discardLogger{}
|
||||||
|
}
|
||||||
1
vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
generated
vendored
1
vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
generated
vendored
@@ -12,7 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package logutil includes utilities to facilitate logging.
|
|
||||||
package logutil
|
package logutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
60
vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
generated
vendored
Normal file
60
vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/pkg/capnslog"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// assert that "packageLogger" satisfy "Logger" interface
|
||||||
|
var _ Logger = &packageLogger{}
|
||||||
|
|
||||||
|
// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// var defaultLogger Logger
|
||||||
|
// defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot")
|
||||||
|
//
|
||||||
|
func NewPackageLogger(repo, pkg string) Logger {
|
||||||
|
return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type packageLogger struct {
|
||||||
|
p *capnslog.PackageLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *packageLogger) Info(args ...interface{}) { l.p.Info(args...) }
|
||||||
|
func (l *packageLogger) Infoln(args ...interface{}) { l.p.Info(args...) }
|
||||||
|
func (l *packageLogger) Infof(format string, args ...interface{}) { l.p.Infof(format, args...) }
|
||||||
|
func (l *packageLogger) Warning(args ...interface{}) { l.p.Warning(args...) }
|
||||||
|
func (l *packageLogger) Warningln(args ...interface{}) { l.p.Warning(args...) }
|
||||||
|
func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
|
||||||
|
func (l *packageLogger) Error(args ...interface{}) { l.p.Error(args...) }
|
||||||
|
func (l *packageLogger) Errorln(args ...interface{}) { l.p.Error(args...) }
|
||||||
|
func (l *packageLogger) Errorf(format string, args ...interface{}) { l.p.Errorf(format, args...) }
|
||||||
|
func (l *packageLogger) Fatal(args ...interface{}) { l.p.Fatal(args...) }
|
||||||
|
func (l *packageLogger) Fatalln(args ...interface{}) { l.p.Fatal(args...) }
|
||||||
|
func (l *packageLogger) Fatalf(format string, args ...interface{}) { l.p.Fatalf(format, args...) }
|
||||||
|
func (l *packageLogger) V(lvl int) bool {
|
||||||
|
return l.p.LevelAt(capnslog.LogLevel(lvl))
|
||||||
|
}
|
||||||
|
func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
|
||||||
|
if l.p.LevelAt(capnslog.LogLevel(lvl)) {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
return &discardLogger{}
|
||||||
|
}
|
||||||
97
vendor/github.com/coreos/etcd/pkg/logutil/zap.go
generated
vendored
Normal file
97
vendor/github.com/coreos/etcd/pkg/logutil/zap.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
// Copyright 2019 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultZapLoggerConfig defines default zap logger configuration.
|
||||||
|
var DefaultZapLoggerConfig = zap.Config{
|
||||||
|
Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
|
||||||
|
|
||||||
|
Development: false,
|
||||||
|
Sampling: &zap.SamplingConfig{
|
||||||
|
Initial: 100,
|
||||||
|
Thereafter: 100,
|
||||||
|
},
|
||||||
|
|
||||||
|
Encoding: "json",
|
||||||
|
|
||||||
|
// copied from "zap.NewProductionEncoderConfig" with some updates
|
||||||
|
EncoderConfig: zapcore.EncoderConfig{
|
||||||
|
TimeKey: "ts",
|
||||||
|
LevelKey: "level",
|
||||||
|
NameKey: "logger",
|
||||||
|
CallerKey: "caller",
|
||||||
|
MessageKey: "msg",
|
||||||
|
StacktraceKey: "stacktrace",
|
||||||
|
LineEnding: zapcore.DefaultLineEnding,
|
||||||
|
EncodeLevel: zapcore.LowercaseLevelEncoder,
|
||||||
|
EncodeTime: zapcore.ISO8601TimeEncoder,
|
||||||
|
EncodeDuration: zapcore.StringDurationEncoder,
|
||||||
|
EncodeCaller: zapcore.ShortCallerEncoder,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Use "/dev/null" to discard all
|
||||||
|
OutputPaths: []string{"stderr"},
|
||||||
|
ErrorOutputPaths: []string{"stderr"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddOutputPaths adds output paths to the existing output paths, resolving conflicts.
|
||||||
|
func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
|
||||||
|
outputs := make(map[string]struct{})
|
||||||
|
for _, v := range cfg.OutputPaths {
|
||||||
|
outputs[v] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, v := range outputPaths {
|
||||||
|
outputs[v] = struct{}{}
|
||||||
|
}
|
||||||
|
outputSlice := make([]string, 0)
|
||||||
|
if _, ok := outputs["/dev/null"]; ok {
|
||||||
|
// "/dev/null" to discard all
|
||||||
|
outputSlice = []string{"/dev/null"}
|
||||||
|
} else {
|
||||||
|
for k := range outputs {
|
||||||
|
outputSlice = append(outputSlice, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cfg.OutputPaths = outputSlice
|
||||||
|
sort.Strings(cfg.OutputPaths)
|
||||||
|
|
||||||
|
errOutputs := make(map[string]struct{})
|
||||||
|
for _, v := range cfg.ErrorOutputPaths {
|
||||||
|
errOutputs[v] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, v := range errorOutputPaths {
|
||||||
|
errOutputs[v] = struct{}{}
|
||||||
|
}
|
||||||
|
errOutputSlice := make([]string, 0)
|
||||||
|
if _, ok := errOutputs["/dev/null"]; ok {
|
||||||
|
// "/dev/null" to discard all
|
||||||
|
errOutputSlice = []string{"/dev/null"}
|
||||||
|
} else {
|
||||||
|
for k := range errOutputs {
|
||||||
|
errOutputSlice = append(errOutputSlice, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cfg.ErrorOutputPaths = errOutputSlice
|
||||||
|
sort.Strings(cfg.ErrorOutputPaths)
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
111
vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
generated
vendored
Normal file
111
vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
|
||||||
|
// It discards all INFO level logging in gRPC, if debug level
|
||||||
|
// is not enabled in "*zap.Logger".
|
||||||
|
func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
|
||||||
|
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
|
||||||
|
// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
|
||||||
|
// if debug level is not enabled in "*zap.Logger".
|
||||||
|
func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
|
||||||
|
// "AddCallerSkip" to annotate caller outside of "logutil"
|
||||||
|
lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
|
||||||
|
return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
|
||||||
|
}
|
||||||
|
|
||||||
|
type zapGRPCLogger struct {
|
||||||
|
lg *zap.Logger
|
||||||
|
sugar *zap.SugaredLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Info(args ...interface{}) {
|
||||||
|
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
zl.sugar.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
|
||||||
|
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
zl.sugar.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
|
||||||
|
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
zl.sugar.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Warning(args ...interface{}) {
|
||||||
|
zl.sugar.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
|
||||||
|
zl.sugar.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Error(args ...interface{}) {
|
||||||
|
zl.sugar.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
|
||||||
|
zl.sugar.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
|
||||||
|
zl.sugar.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
|
||||||
|
zl.sugar.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapGRPCLogger) V(l int) bool {
|
||||||
|
// infoLog == 0
|
||||||
|
if l <= 0 { // debug level, then we ignore info level in gRPC
|
||||||
|
return !zl.lg.Core().Enabled(zapcore.DebugLevel)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
92
vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
generated
vendored
Normal file
92
vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/systemd"
|
||||||
|
|
||||||
|
"github.com/coreos/go-systemd/journal"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewJournalWriter wraps "io.Writer" to redirect log output
|
||||||
|
// to the local systemd journal. If journald send fails, it fails
|
||||||
|
// back to writing to the original writer.
|
||||||
|
// The decode overhead is only <30µs per write.
|
||||||
|
// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
|
||||||
|
func NewJournalWriter(wr io.Writer) (io.Writer, error) {
|
||||||
|
return &journalWriter{Writer: wr}, systemd.DialJournal()
|
||||||
|
}
|
||||||
|
|
||||||
|
type journalWriter struct {
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// WARN: assume that etcd uses default field names in zap encoder config
|
||||||
|
// make sure to keep this up-to-date!
|
||||||
|
type logLine struct {
|
||||||
|
Level string `json:"level"`
|
||||||
|
Caller string `json:"caller"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *journalWriter) Write(p []byte) (int, error) {
|
||||||
|
line := &logLine{}
|
||||||
|
if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var pri journal.Priority
|
||||||
|
switch line.Level {
|
||||||
|
case zapcore.DebugLevel.String():
|
||||||
|
pri = journal.PriDebug
|
||||||
|
case zapcore.InfoLevel.String():
|
||||||
|
pri = journal.PriInfo
|
||||||
|
|
||||||
|
case zapcore.WarnLevel.String():
|
||||||
|
pri = journal.PriWarning
|
||||||
|
case zapcore.ErrorLevel.String():
|
||||||
|
pri = journal.PriErr
|
||||||
|
|
||||||
|
case zapcore.DPanicLevel.String():
|
||||||
|
pri = journal.PriCrit
|
||||||
|
case zapcore.PanicLevel.String():
|
||||||
|
pri = journal.PriCrit
|
||||||
|
case zapcore.FatalLevel.String():
|
||||||
|
pri = journal.PriCrit
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unknown log level: %q", line.Level))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := journal.Send(string(p), pri, map[string]string{
|
||||||
|
"PACKAGE": filepath.Dir(line.Caller),
|
||||||
|
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// "journal" also falls back to stderr
|
||||||
|
// "fmt.Fprintln(os.Stderr, s)"
|
||||||
|
return w.Writer.Write(p)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
102
vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
generated
vendored
Normal file
102
vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package logutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/raft"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRaftLogger builds "raft.Logger" from "*zap.Config".
|
||||||
|
func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
|
||||||
|
if lcfg == nil {
|
||||||
|
return nil, errors.New("nil zap.Config")
|
||||||
|
}
|
||||||
|
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
|
||||||
|
func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
|
||||||
|
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
|
||||||
|
// and "zapcore.WriteSyncer".
|
||||||
|
func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
|
||||||
|
// "AddCallerSkip" to annotate caller outside of "logutil"
|
||||||
|
lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
|
||||||
|
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
|
||||||
|
}
|
||||||
|
|
||||||
|
type zapRaftLogger struct {
|
||||||
|
lg *zap.Logger
|
||||||
|
sugar *zap.SugaredLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Debug(args ...interface{}) {
|
||||||
|
zl.sugar.Debug(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Debugf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Error(args ...interface{}) {
|
||||||
|
zl.sugar.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Info(args ...interface{}) {
|
||||||
|
zl.sugar.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Warning(args ...interface{}) {
|
||||||
|
zl.sugar.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Fatal(args ...interface{}) {
|
||||||
|
zl.sugar.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Panic(args ...interface{}) {
|
||||||
|
zl.sugar.Panic(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
|
||||||
|
zl.sugar.Panicf(format, args...)
|
||||||
|
}
|
||||||
26
vendor/github.com/coreos/etcd/pkg/systemd/BUILD
generated
vendored
Normal file
26
vendor/github.com/coreos/etcd/pkg/systemd/BUILD
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"journal.go",
|
||||||
|
],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/pkg/systemd",
|
||||||
|
importpath = "github.com/coreos/etcd/pkg/systemd",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
16
vendor/github.com/coreos/etcd/pkg/systemd/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/pkg/systemd/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package systemd provides utility functions for systemd.
|
||||||
|
package systemd
|
||||||
29
vendor/github.com/coreos/etcd/pkg/systemd/journal.go
generated
vendored
Normal file
29
vendor/github.com/coreos/etcd/pkg/systemd/journal.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2018 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package systemd
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
|
// DialJournal returns no error if the process can dial journal socket.
|
||||||
|
// Returns an error if dial failed, whichi indicates journald is not available
|
||||||
|
// (e.g. run embedded etcd as docker daemon).
|
||||||
|
// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
|
||||||
|
func DialJournal() error {
|
||||||
|
conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
|
||||||
|
if conn != nil {
|
||||||
|
defer conn.Close()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
1
vendor/github.com/coreos/etcd/pkg/testutil/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/pkg/testutil/BUILD
generated
vendored
@@ -8,6 +8,7 @@ go_library(
|
|||||||
"pauseable_handler.go",
|
"pauseable_handler.go",
|
||||||
"recorder.go",
|
"recorder.go",
|
||||||
"testutil.go",
|
"testutil.go",
|
||||||
|
"var.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/pkg/testutil",
|
importmap = "k8s.io/kubernetes/vendor/github.com/coreos/etcd/pkg/testutil",
|
||||||
importpath = "github.com/coreos/etcd/pkg/testutil",
|
importpath = "github.com/coreos/etcd/pkg/testutil",
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2017 The etcd Authors
|
// Copyright 2018 The etcd Authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@@ -12,19 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package clientv3
|
package testutil
|
||||||
|
|
||||||
import "context"
|
import "time"
|
||||||
|
|
||||||
// TODO: remove this when "FailFast=false" is fixed.
|
var (
|
||||||
// See https://github.com/grpc/grpc-go/issues/1532.
|
ApplyTimeout = time.Second
|
||||||
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
|
RequestTimeout = 3 * time.Second
|
||||||
select {
|
)
|
||||||
case <-ready:
|
|
||||||
return nil
|
|
||||||
case <-rpcCtx.Done():
|
|
||||||
return rpcCtx.Err()
|
|
||||||
case <-clientCtx.Done():
|
|
||||||
return clientCtx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
1
vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go
generated
vendored
1
vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go
generated
vendored
@@ -53,6 +53,7 @@ func keyFunc(req *pb.RangeRequest) string {
|
|||||||
func NewCache(maxCacheEntries int) Cache {
|
func NewCache(maxCacheEntries int) Cache {
|
||||||
return &cache{
|
return &cache{
|
||||||
lru: lru.New(maxCacheEntries),
|
lru: lru.New(maxCacheEntries),
|
||||||
|
cachedRanges: adt.NewIntervalTree(),
|
||||||
compactedRev: -1,
|
compactedRev: -1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/raft/logger.go
generated
vendored
2
vendor/github.com/coreos/etcd/raft/logger.go
generated
vendored
@@ -114,7 +114,7 @@ func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *DefaultLogger) Panic(v ...interface{}) {
|
func (l *DefaultLogger) Panic(v ...interface{}) {
|
||||||
l.Logger.Panic(v)
|
l.Logger.Panic(v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
|
func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
|
||||||
|
|||||||
1
vendor/github.com/coreos/etcd/raft/raft.go
generated
vendored
1
vendor/github.com/coreos/etcd/raft/raft.go
generated
vendored
@@ -663,6 +663,7 @@ func (r *raft) becomePreCandidate() {
|
|||||||
r.step = stepCandidate
|
r.step = stepCandidate
|
||||||
r.votes = make(map[uint64]bool)
|
r.votes = make(map[uint64]bool)
|
||||||
r.tick = r.tickElection
|
r.tick = r.tickElection
|
||||||
|
r.lead = None
|
||||||
r.state = StatePreCandidate
|
r.state = StatePreCandidate
|
||||||
r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
|
r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
|
||||||
}
|
}
|
||||||
|
|||||||
1073
vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
generated
vendored
1073
vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/github.com/coreos/etcd/rafthttp/http.go
generated
vendored
4
vendor/github.com/coreos/etcd/rafthttp/http.go
generated
vendored
@@ -203,6 +203,10 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
snapshotReceiveInflights.WithLabelValues(from).Inc()
|
||||||
|
defer func() {
|
||||||
|
snapshotReceiveInflights.WithLabelValues(from).Dec()
|
||||||
|
}()
|
||||||
plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From))
|
plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From))
|
||||||
// save incoming database snapshot.
|
// save incoming database snapshot.
|
||||||
n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
|
n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
|
||||||
|
|||||||
20
vendor/github.com/coreos/etcd/rafthttp/metrics.go
generated
vendored
20
vendor/github.com/coreos/etcd/rafthttp/metrics.go
generated
vendored
@@ -62,6 +62,15 @@ var (
|
|||||||
[]string{"To"},
|
[]string{"To"},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
snapshotSendInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "network",
|
||||||
|
Name: "snapshot_send_inflights_total",
|
||||||
|
Help: "Total number of inflight snapshot sends",
|
||||||
|
},
|
||||||
|
[]string{"To"},
|
||||||
|
)
|
||||||
|
|
||||||
snapshotSendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
|
snapshotSendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "network",
|
Subsystem: "network",
|
||||||
@@ -93,6 +102,15 @@ var (
|
|||||||
[]string{"From"},
|
[]string{"From"},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
snapshotReceiveInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Namespace: "etcd",
|
||||||
|
Subsystem: "network",
|
||||||
|
Name: "snapshot_receive_inflights_total",
|
||||||
|
Help: "Total number of inflight snapshot receives",
|
||||||
|
},
|
||||||
|
[]string{"From"},
|
||||||
|
)
|
||||||
|
|
||||||
snapshotReceiveFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
|
snapshotReceiveFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "network",
|
Subsystem: "network",
|
||||||
@@ -133,9 +151,11 @@ func init() {
|
|||||||
prometheus.MustRegister(recvFailures)
|
prometheus.MustRegister(recvFailures)
|
||||||
|
|
||||||
prometheus.MustRegister(snapshotSend)
|
prometheus.MustRegister(snapshotSend)
|
||||||
|
prometheus.MustRegister(snapshotSendInflights)
|
||||||
prometheus.MustRegister(snapshotSendFailures)
|
prometheus.MustRegister(snapshotSendFailures)
|
||||||
prometheus.MustRegister(snapshotSendSeconds)
|
prometheus.MustRegister(snapshotSendSeconds)
|
||||||
prometheus.MustRegister(snapshotReceive)
|
prometheus.MustRegister(snapshotReceive)
|
||||||
|
prometheus.MustRegister(snapshotReceiveInflights)
|
||||||
prometheus.MustRegister(snapshotReceiveFailures)
|
prometheus.MustRegister(snapshotReceiveFailures)
|
||||||
prometheus.MustRegister(snapshotReceiveSeconds)
|
prometheus.MustRegister(snapshotReceiveSeconds)
|
||||||
|
|
||||||
|
|||||||
4
vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go
generated
vendored
4
vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go
generated
vendored
@@ -76,6 +76,10 @@ func (s *snapshotSender) send(merged snap.Message) {
|
|||||||
req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid)
|
req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid)
|
||||||
|
|
||||||
plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To))
|
plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To))
|
||||||
|
snapshotSendInflights.WithLabelValues(to).Inc()
|
||||||
|
defer func() {
|
||||||
|
snapshotSendInflights.WithLabelValues(to).Dec()
|
||||||
|
}()
|
||||||
|
|
||||||
err := s.post(req)
|
err := s.post(req)
|
||||||
defer merged.CloseWithError(err)
|
defer merged.CloseWithError(err)
|
||||||
|
|||||||
4
vendor/github.com/coreos/etcd/rafthttp/transport.go
generated
vendored
4
vendor/github.com/coreos/etcd/rafthttp/transport.go
generated
vendored
@@ -372,12 +372,16 @@ type Pausable interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) Pause() {
|
func (t *Transport) Pause() {
|
||||||
|
t.mu.RLock()
|
||||||
|
defer t.mu.RUnlock()
|
||||||
for _, p := range t.peers {
|
for _, p := range t.peers {
|
||||||
p.(Pausable).Pause()
|
p.(Pausable).Pause()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) Resume() {
|
func (t *Transport) Resume() {
|
||||||
|
t.mu.RLock()
|
||||||
|
defer t.mu.RUnlock()
|
||||||
for _, p := range t.peers {
|
for _, p := range t.peers {
|
||||||
p.(Pausable).Resume()
|
p.(Pausable).Resume()
|
||||||
}
|
}
|
||||||
|
|||||||
161
vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go
generated
vendored
161
vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go
generated
vendored
@@ -1,27 +1,16 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: snap.proto
|
// source: snap.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package snappb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
snap.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Snapshot
|
|
||||||
*/
|
|
||||||
package snappb
|
package snappb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
|
io "io"
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
|
||||||
_ "github.com/gogo/protobuf/gogoproto"
|
_ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
io "io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@@ -36,23 +25,68 @@ var _ = math.Inf
|
|||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
|
Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
|
||||||
Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
|
Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Snapshot) Reset() { *m = Snapshot{} }
|
func (m *Snapshot) Reset() { *m = Snapshot{} }
|
||||||
func (m *Snapshot) String() string { return proto.CompactTextString(m) }
|
func (m *Snapshot) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Snapshot) ProtoMessage() {}
|
func (*Snapshot) ProtoMessage() {}
|
||||||
func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int{0} }
|
func (*Snapshot) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_f2e3c045ebf84d00, []int{0}
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Snapshot.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Snapshot.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Snapshot proto.InternalMessageInfo
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
|
proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) }
|
||||||
|
|
||||||
|
var fileDescriptor_f2e3c045ebf84d00 = []byte{
|
||||||
|
// 126 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
|
||||||
|
0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
|
||||||
|
0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
|
||||||
|
0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
|
||||||
|
0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24,
|
||||||
|
0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1,
|
||||||
|
0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e,
|
||||||
|
0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Snapshot) Marshal() (dAtA []byte, err error) {
|
func (m *Snapshot) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -60,35 +94,47 @@ func (m *Snapshot) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
dAtA[i] = 0x8
|
|
||||||
i++
|
|
||||||
i = encodeVarintSnap(dAtA, i, uint64(m.Crc))
|
|
||||||
if m.Data != nil {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintSnap(dAtA, i, uint64(len(m.Data)))
|
|
||||||
i += copy(dAtA[i:], m.Data)
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.Data != nil {
|
||||||
|
i -= len(m.Data)
|
||||||
|
copy(dAtA[i:], m.Data)
|
||||||
|
i = encodeVarintSnap(dAtA, i, uint64(len(m.Data)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
i = encodeVarintSnap(dAtA, i, uint64(m.Crc))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovSnap(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *Snapshot) Size() (n int) {
|
func (m *Snapshot) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
n += 1 + sovSnap(uint64(m.Crc))
|
n += 1 + sovSnap(uint64(m.Crc))
|
||||||
@@ -103,14 +149,7 @@ func (m *Snapshot) Size() (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func sovSnap(x uint64) (n int) {
|
func sovSnap(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozSnap(x uint64) (n int) {
|
func sozSnap(x uint64) (n int) {
|
||||||
return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@@ -130,7 +169,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -158,7 +197,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Crc |= (uint32(b) & 0x7F) << shift
|
m.Crc |= uint32(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -177,7 +216,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -186,6 +225,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthSnap
|
return ErrInvalidLengthSnap
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthSnap
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -203,6 +245,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthSnap
|
return ErrInvalidLengthSnap
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthSnap
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -270,10 +315,13 @@ func skipSnap(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iNdEx += length
|
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return 0, ErrInvalidLengthSnap
|
return 0, ErrInvalidLengthSnap
|
||||||
}
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthSnap
|
||||||
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
for {
|
||||||
@@ -302,6 +350,9 @@ func skipSnap(dAtA []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
iNdEx = start + next
|
iNdEx = start + next
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthSnap
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 4:
|
case 4:
|
||||||
@@ -320,17 +371,3 @@ var (
|
|||||||
ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) }
|
|
||||||
|
|
||||||
var fileDescriptorSnap = []byte{
|
|
||||||
// 126 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
|
|
||||||
0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
|
|
||||||
0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
|
|
||||||
0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
|
|
||||||
0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24,
|
|
||||||
0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1,
|
|
||||||
0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e,
|
|
||||||
0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/version/version.go
generated
vendored
2
vendor/github.com/coreos/etcd/version/version.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
||||||
MinClusterVersion = "3.0.0"
|
MinClusterVersion = "3.0.0"
|
||||||
Version = "3.3.13"
|
Version = "3.3.15"
|
||||||
APIVersion = "unknown"
|
APIVersion = "unknown"
|
||||||
|
|
||||||
// Git SHA Value will be set during build
|
// Git SHA Value will be set during build
|
||||||
|
|||||||
262
vendor/github.com/coreos/etcd/wal/walpb/record.pb.go
generated
vendored
262
vendor/github.com/coreos/etcd/wal/walpb/record.pb.go
generated
vendored
@@ -1,28 +1,16 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: record.proto
|
// source: record.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package walpb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
record.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Record
|
|
||||||
Snapshot
|
|
||||||
*/
|
|
||||||
package walpb
|
package walpb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
|
io "io"
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
|
||||||
_ "github.com/gogo/protobuf/gogoproto"
|
_ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
io "io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@@ -37,36 +25,115 @@ var _ = math.Inf
|
|||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Record struct {
|
type Record struct {
|
||||||
Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
|
Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
|
||||||
Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
|
Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
|
||||||
Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
|
Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Record) Reset() { *m = Record{} }
|
func (m *Record) Reset() { *m = Record{} }
|
||||||
func (m *Record) String() string { return proto.CompactTextString(m) }
|
func (m *Record) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Record) ProtoMessage() {}
|
func (*Record) ProtoMessage() {}
|
||||||
func (*Record) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{0} }
|
func (*Record) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_bf94fd919e302a1d, []int{0}
|
||||||
|
}
|
||||||
|
func (m *Record) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Record.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Record) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Record.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Record) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Record) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Record.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Record proto.InternalMessageInfo
|
||||||
|
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
|
Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
|
||||||
Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
|
Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Snapshot) Reset() { *m = Snapshot{} }
|
func (m *Snapshot) Reset() { *m = Snapshot{} }
|
||||||
func (m *Snapshot) String() string { return proto.CompactTextString(m) }
|
func (m *Snapshot) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Snapshot) ProtoMessage() {}
|
func (*Snapshot) ProtoMessage() {}
|
||||||
func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{1} }
|
func (*Snapshot) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_bf94fd919e302a1d, []int{1}
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Snapshot.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Snapshot) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Snapshot.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Snapshot proto.InternalMessageInfo
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Record)(nil), "walpb.Record")
|
proto.RegisterType((*Record)(nil), "walpb.Record")
|
||||||
proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
|
proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) }
|
||||||
|
|
||||||
|
var fileDescriptor_bf94fd919e302a1d = []byte{
|
||||||
|
// 186 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
|
||||||
|
0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92,
|
||||||
|
0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6,
|
||||||
|
0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
|
||||||
|
0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45,
|
||||||
|
0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a,
|
||||||
|
0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11,
|
||||||
|
0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a,
|
||||||
|
0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b,
|
||||||
|
0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc,
|
||||||
|
0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00,
|
||||||
|
0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Record) Marshal() (dAtA []byte, err error) {
|
func (m *Record) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -74,32 +141,39 @@ func (m *Record) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Record) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Record) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Record) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
dAtA[i] = 0x8
|
|
||||||
i++
|
|
||||||
i = encodeVarintRecord(dAtA, i, uint64(m.Type))
|
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintRecord(dAtA, i, uint64(m.Crc))
|
|
||||||
if m.Data != nil {
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintRecord(dAtA, i, uint64(len(m.Data)))
|
|
||||||
i += copy(dAtA[i:], m.Data)
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.Data != nil {
|
||||||
|
i -= len(m.Data)
|
||||||
|
copy(dAtA[i:], m.Data)
|
||||||
|
i = encodeVarintRecord(dAtA, i, uint64(len(m.Data)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
}
|
||||||
|
i = encodeVarintRecord(dAtA, i, uint64(m.Crc))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i = encodeVarintRecord(dAtA, i, uint64(m.Type))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Snapshot) Marshal() (dAtA []byte, err error) {
|
func (m *Snapshot) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -107,32 +181,43 @@ func (m *Snapshot) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
dAtA[i] = 0x8
|
|
||||||
i++
|
|
||||||
i = encodeVarintRecord(dAtA, i, uint64(m.Index))
|
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintRecord(dAtA, i, uint64(m.Term))
|
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
i = encodeVarintRecord(dAtA, i, uint64(m.Term))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i = encodeVarintRecord(dAtA, i, uint64(m.Index))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovRecord(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *Record) Size() (n int) {
|
func (m *Record) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
n += 1 + sovRecord(uint64(m.Type))
|
n += 1 + sovRecord(uint64(m.Type))
|
||||||
@@ -148,6 +233,9 @@ func (m *Record) Size() (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Snapshot) Size() (n int) {
|
func (m *Snapshot) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
n += 1 + sovRecord(uint64(m.Index))
|
n += 1 + sovRecord(uint64(m.Index))
|
||||||
@@ -159,14 +247,7 @@ func (m *Snapshot) Size() (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func sovRecord(x uint64) (n int) {
|
func sovRecord(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozRecord(x uint64) (n int) {
|
func sozRecord(x uint64) (n int) {
|
||||||
return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@@ -186,7 +267,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -214,7 +295,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Type |= (int64(b) & 0x7F) << shift
|
m.Type |= int64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -233,7 +314,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Crc |= (uint32(b) & 0x7F) << shift
|
m.Crc |= uint32(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -252,7 +333,7 @@ func (m *Record) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= int(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -261,6 +342,9 @@ func (m *Record) Unmarshal(dAtA []byte) error {
|
|||||||
return ErrInvalidLengthRecord
|
return ErrInvalidLengthRecord
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + byteLen
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthRecord
|
||||||
|
}
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -278,6 +362,9 @@ func (m *Record) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthRecord
|
return ErrInvalidLengthRecord
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthRecord
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -306,7 +393,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -334,7 +421,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Index |= (uint64(b) & 0x7F) << shift
|
m.Index |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -353,7 +440,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Term |= (uint64(b) & 0x7F) << shift
|
m.Term |= uint64(b&0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -367,6 +454,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
|
|||||||
if skippy < 0 {
|
if skippy < 0 {
|
||||||
return ErrInvalidLengthRecord
|
return ErrInvalidLengthRecord
|
||||||
}
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthRecord
|
||||||
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -434,10 +524,13 @@ func skipRecord(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iNdEx += length
|
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return 0, ErrInvalidLengthRecord
|
return 0, ErrInvalidLengthRecord
|
||||||
}
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthRecord
|
||||||
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
for {
|
||||||
@@ -466,6 +559,9 @@ func skipRecord(dAtA []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
iNdEx = start + next
|
iNdEx = start + next
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthRecord
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
return iNdEx, nil
|
||||||
case 4:
|
case 4:
|
||||||
@@ -484,21 +580,3 @@ var (
|
|||||||
ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) }
|
|
||||||
|
|
||||||
var fileDescriptorRecord = []byte{
|
|
||||||
// 186 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
|
|
||||||
0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92,
|
|
||||||
0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6,
|
|
||||||
0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
|
|
||||||
0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45,
|
|
||||||
0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a,
|
|
||||||
0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11,
|
|
||||||
0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a,
|
|
||||||
0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b,
|
|
||||||
0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc,
|
|
||||||
0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00,
|
|
||||||
0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|||||||
30
vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD
generated
vendored
Normal file
30
vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"annotations.pb.go",
|
||||||
|
"http.pb.go",
|
||||||
|
],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/google.golang.org/genproto/googleapis/api/annotations",
|
||||||
|
importpath = "google.golang.org/genproto/googleapis/api/annotations",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||||
|
"//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
64
vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
generated
vendored
Normal file
64
vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: google/api/annotations.proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package annotations is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
google/api/annotations.proto
|
||||||
|
google/api/http.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Http
|
||||||
|
HttpRule
|
||||||
|
CustomHttpPattern
|
||||||
|
*/
|
||||||
|
package annotations
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
var E_Http = &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*google_protobuf.MethodOptions)(nil),
|
||||||
|
ExtensionType: (*HttpRule)(nil),
|
||||||
|
Field: 72295728,
|
||||||
|
Name: "google.api.http",
|
||||||
|
Tag: "bytes,72295728,opt,name=http",
|
||||||
|
Filename: "google/api/annotations.proto",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterExtension(E_Http)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 208 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
|
||||||
|
0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
|
||||||
|
0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79,
|
||||||
|
0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15,
|
||||||
|
0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53,
|
||||||
|
0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51,
|
||||||
|
0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a,
|
||||||
|
0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08,
|
||||||
|
0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5,
|
||||||
|
0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64,
|
||||||
|
0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d,
|
||||||
|
0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00,
|
||||||
|
}
|
||||||
566
vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
generated
vendored
Normal file
566
vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
generated
vendored
Normal file
@@ -0,0 +1,566 @@
|
|||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: google/api/http.proto
|
||||||
|
|
||||||
|
package annotations
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// Defines the HTTP configuration for a service. It contains a list of
|
||||||
|
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
|
||||||
|
// to one or more HTTP REST API methods.
|
||||||
|
type Http struct {
|
||||||
|
// A list of HTTP configuration rules that apply to individual API methods.
|
||||||
|
//
|
||||||
|
// **NOTE:** All service configuration rules follow "last one wins" order.
|
||||||
|
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Http) Reset() { *m = Http{} }
|
||||||
|
func (m *Http) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Http) ProtoMessage() {}
|
||||||
|
func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
|
||||||
|
|
||||||
|
func (m *Http) GetRules() []*HttpRule {
|
||||||
|
if m != nil {
|
||||||
|
return m.Rules
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// `HttpRule` defines the mapping of an RPC method to one or more HTTP
|
||||||
|
// REST APIs. The mapping determines what portions of the request
|
||||||
|
// message are populated from the path, query parameters, or body of
|
||||||
|
// the HTTP request. The mapping is typically specified as an
|
||||||
|
// `google.api.http` annotation, see "google/api/annotations.proto"
|
||||||
|
// for details.
|
||||||
|
//
|
||||||
|
// The mapping consists of a field specifying the path template and
|
||||||
|
// method kind. The path template can refer to fields in the request
|
||||||
|
// message, as in the example below which describes a REST GET
|
||||||
|
// operation on a resource collection of messages:
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// service Messaging {
|
||||||
|
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||||
|
// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// message GetMessageRequest {
|
||||||
|
// message SubMessage {
|
||||||
|
// string subfield = 1;
|
||||||
|
// }
|
||||||
|
// string message_id = 1; // mapped to the URL
|
||||||
|
// SubMessage sub = 2; // `sub.subfield` is url-mapped
|
||||||
|
// }
|
||||||
|
// message Message {
|
||||||
|
// string text = 1; // content of the resource
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The same http annotation can alternatively be expressed inside the
|
||||||
|
// `GRPC API Configuration` YAML file.
|
||||||
|
//
|
||||||
|
// http:
|
||||||
|
// rules:
|
||||||
|
// - selector: <proto_package_name>.Messaging.GetMessage
|
||||||
|
// get: /v1/messages/{message_id}/{sub.subfield}
|
||||||
|
//
|
||||||
|
// This definition enables an automatic, bidrectional mapping of HTTP
|
||||||
|
// JSON to RPC. Example:
|
||||||
|
//
|
||||||
|
// HTTP | RPC
|
||||||
|
// -----|-----
|
||||||
|
// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
|
||||||
|
//
|
||||||
|
// In general, not only fields but also field paths can be referenced
|
||||||
|
// from a path pattern. Fields mapped to the path pattern cannot be
|
||||||
|
// repeated and must have a primitive (non-message) type.
|
||||||
|
//
|
||||||
|
// Any fields in the request message which are not bound by the path
|
||||||
|
// pattern automatically become (optional) HTTP query
|
||||||
|
// parameters. Assume the following definition of the request message:
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// message GetMessageRequest {
|
||||||
|
// message SubMessage {
|
||||||
|
// string subfield = 1;
|
||||||
|
// }
|
||||||
|
// string message_id = 1; // mapped to the URL
|
||||||
|
// int64 revision = 2; // becomes a parameter
|
||||||
|
// SubMessage sub = 3; // `sub.subfield` becomes a parameter
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// This enables a HTTP JSON to RPC mapping as below:
|
||||||
|
//
|
||||||
|
// HTTP | RPC
|
||||||
|
// -----|-----
|
||||||
|
// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
|
||||||
|
//
|
||||||
|
// Note that fields which are mapped to HTTP parameters must have a
|
||||||
|
// primitive type or a repeated primitive type. Message types are not
|
||||||
|
// allowed. In the case of a repeated type, the parameter can be
|
||||||
|
// repeated in the URL, as in `...?param=A¶m=B`.
|
||||||
|
//
|
||||||
|
// For HTTP method kinds which allow a request body, the `body` field
|
||||||
|
// specifies the mapping. Consider a REST update method on the
|
||||||
|
// message resource collection:
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// service Messaging {
|
||||||
|
// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
|
||||||
|
// option (google.api.http) = {
|
||||||
|
// put: "/v1/messages/{message_id}"
|
||||||
|
// body: "message"
|
||||||
|
// };
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// message UpdateMessageRequest {
|
||||||
|
// string message_id = 1; // mapped to the URL
|
||||||
|
// Message message = 2; // mapped to the body
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// The following HTTP JSON to RPC mapping is enabled, where the
|
||||||
|
// representation of the JSON in the request body is determined by
|
||||||
|
// protos JSON encoding:
|
||||||
|
//
|
||||||
|
// HTTP | RPC
|
||||||
|
// -----|-----
|
||||||
|
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
|
||||||
|
//
|
||||||
|
// The special name `*` can be used in the body mapping to define that
|
||||||
|
// every field not bound by the path template should be mapped to the
|
||||||
|
// request body. This enables the following alternative definition of
|
||||||
|
// the update method:
|
||||||
|
//
|
||||||
|
// service Messaging {
|
||||||
|
// rpc UpdateMessage(Message) returns (Message) {
|
||||||
|
// option (google.api.http) = {
|
||||||
|
// put: "/v1/messages/{message_id}"
|
||||||
|
// body: "*"
|
||||||
|
// };
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// message Message {
|
||||||
|
// string message_id = 1;
|
||||||
|
// string text = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// The following HTTP JSON to RPC mapping is enabled:
|
||||||
|
//
|
||||||
|
// HTTP | RPC
|
||||||
|
// -----|-----
|
||||||
|
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
|
||||||
|
//
|
||||||
|
// Note that when using `*` in the body mapping, it is not possible to
|
||||||
|
// have HTTP parameters, as all fields not bound by the path end in
|
||||||
|
// the body. This makes this option more rarely used in practice of
|
||||||
|
// defining REST APIs. The common usage of `*` is in custom methods
|
||||||
|
// which don't use the URL at all for transferring data.
|
||||||
|
//
|
||||||
|
// It is possible to define multiple HTTP methods for one RPC by using
|
||||||
|
// the `additional_bindings` option. Example:
|
||||||
|
//
|
||||||
|
// service Messaging {
|
||||||
|
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||||
|
// option (google.api.http) = {
|
||||||
|
// get: "/v1/messages/{message_id}"
|
||||||
|
// additional_bindings {
|
||||||
|
// get: "/v1/users/{user_id}/messages/{message_id}"
|
||||||
|
// }
|
||||||
|
// };
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// message GetMessageRequest {
|
||||||
|
// string message_id = 1;
|
||||||
|
// string user_id = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// This enables the following two alternative HTTP JSON to RPC
|
||||||
|
// mappings:
|
||||||
|
//
|
||||||
|
// HTTP | RPC
|
||||||
|
// -----|-----
|
||||||
|
// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
|
||||||
|
// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
|
||||||
|
//
|
||||||
|
// # Rules for HTTP mapping
|
||||||
|
//
|
||||||
|
// The rules for mapping HTTP path, query parameters, and body fields
|
||||||
|
// to the request message are as follows:
|
||||||
|
//
|
||||||
|
// 1. The `body` field specifies either `*` or a field path, or is
|
||||||
|
// omitted. If omitted, it assumes there is no HTTP body.
|
||||||
|
// 2. Leaf fields (recursive expansion of nested messages in the
|
||||||
|
// request) can be classified into three types:
|
||||||
|
// (a) Matched in the URL template.
|
||||||
|
// (b) Covered by body (if body is `*`, everything except (a) fields;
|
||||||
|
// else everything under the body field)
|
||||||
|
// (c) All other fields.
|
||||||
|
// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
|
||||||
|
// 4. Any body sent with an HTTP request can contain only (b) fields.
|
||||||
|
//
|
||||||
|
// The syntax of the path template is as follows:
|
||||||
|
//
|
||||||
|
// Template = "/" Segments [ Verb ] ;
|
||||||
|
// Segments = Segment { "/" Segment } ;
|
||||||
|
// Segment = "*" | "**" | LITERAL | Variable ;
|
||||||
|
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
|
||||||
|
// FieldPath = IDENT { "." IDENT } ;
|
||||||
|
// Verb = ":" LITERAL ;
|
||||||
|
//
|
||||||
|
// The syntax `*` matches a single path segment. It follows the semantics of
|
||||||
|
// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
|
||||||
|
// Expansion.
|
||||||
|
//
|
||||||
|
// The syntax `**` matches zero or more path segments. It follows the semantics
|
||||||
|
// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved
|
||||||
|
// Expansion. NOTE: it must be the last segment in the path except the Verb.
|
||||||
|
//
|
||||||
|
// The syntax `LITERAL` matches literal text in the URL path.
|
||||||
|
//
|
||||||
|
// The syntax `Variable` matches the entire path as specified by its template;
|
||||||
|
// this nested template must not contain further variables. If a variable
|
||||||
|
// matches a single path segment, its template may be omitted, e.g. `{var}`
|
||||||
|
// is equivalent to `{var=*}`.
|
||||||
|
//
|
||||||
|
// NOTE: the field paths in variables and in the `body` must not refer to
|
||||||
|
// repeated fields or map fields.
|
||||||
|
//
|
||||||
|
// Use CustomHttpPattern to specify any HTTP method that is not included in the
|
||||||
|
// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for
|
||||||
|
// a given URL path rule. The wild-card rule is useful for services that provide
|
||||||
|
// content to Web (HTML) clients.
|
||||||
|
type HttpRule struct {
|
||||||
|
// Selects methods to which this rule applies.
|
||||||
|
//
|
||||||
|
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
|
||||||
|
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
|
||||||
|
// Determines the URL pattern is matched by this rules. This pattern can be
|
||||||
|
// used with any of the {get|put|post|delete|patch} methods. A custom method
|
||||||
|
// can be defined using the 'custom' field.
|
||||||
|
//
|
||||||
|
// Types that are valid to be assigned to Pattern:
|
||||||
|
// *HttpRule_Get
|
||||||
|
// *HttpRule_Put
|
||||||
|
// *HttpRule_Post
|
||||||
|
// *HttpRule_Delete
|
||||||
|
// *HttpRule_Patch
|
||||||
|
// *HttpRule_Custom
|
||||||
|
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
|
||||||
|
// The name of the request field whose value is mapped to the HTTP body, or
|
||||||
|
// `*` for mapping all fields not captured by the path pattern to the HTTP
|
||||||
|
// body. NOTE: the referred field must not be a repeated field and must be
|
||||||
|
// present at the top-level of request message type.
|
||||||
|
Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"`
|
||||||
|
// Additional HTTP bindings for the selector. Nested bindings must
|
||||||
|
// not contain an `additional_bindings` field themselves (that is,
|
||||||
|
// the nesting may only be one level deep).
|
||||||
|
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) Reset() { *m = HttpRule{} }
|
||||||
|
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*HttpRule) ProtoMessage() {}
|
||||||
|
func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
|
||||||
|
|
||||||
|
type isHttpRule_Pattern interface {
|
||||||
|
isHttpRule_Pattern()
|
||||||
|
}
|
||||||
|
|
||||||
|
type HttpRule_Get struct {
|
||||||
|
Get string `protobuf:"bytes,2,opt,name=get,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Put struct {
|
||||||
|
Put string `protobuf:"bytes,3,opt,name=put,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Post struct {
|
||||||
|
Post string `protobuf:"bytes,4,opt,name=post,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Delete struct {
|
||||||
|
Delete string `protobuf:"bytes,5,opt,name=delete,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Patch struct {
|
||||||
|
Patch string `protobuf:"bytes,6,opt,name=patch,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Custom struct {
|
||||||
|
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*HttpRule_Get) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Put) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Post) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Delete) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Patch) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Custom) isHttpRule_Pattern() {}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
|
||||||
|
if m != nil {
|
||||||
|
return m.Pattern
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetSelector() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Selector
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetGet() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
|
||||||
|
return x.Get
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPut() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
|
||||||
|
return x.Put
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPost() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
|
||||||
|
return x.Post
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetDelete() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
|
||||||
|
return x.Delete
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPatch() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
|
||||||
|
return x.Patch
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetCustom() *CustomHttpPattern {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
|
||||||
|
return x.Custom
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetBody() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Body
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
|
||||||
|
if m != nil {
|
||||||
|
return m.AdditionalBindings
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||||
|
func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||||
|
return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{
|
||||||
|
(*HttpRule_Get)(nil),
|
||||||
|
(*HttpRule_Put)(nil),
|
||||||
|
(*HttpRule_Post)(nil),
|
||||||
|
(*HttpRule_Delete)(nil),
|
||||||
|
(*HttpRule_Patch)(nil),
|
||||||
|
(*HttpRule_Custom)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||||
|
m := msg.(*HttpRule)
|
||||||
|
// pattern
|
||||||
|
switch x := m.Pattern.(type) {
|
||||||
|
case *HttpRule_Get:
|
||||||
|
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Get)
|
||||||
|
case *HttpRule_Put:
|
||||||
|
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Put)
|
||||||
|
case *HttpRule_Post:
|
||||||
|
b.EncodeVarint(4<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Post)
|
||||||
|
case *HttpRule_Delete:
|
||||||
|
b.EncodeVarint(5<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Delete)
|
||||||
|
case *HttpRule_Patch:
|
||||||
|
b.EncodeVarint(6<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Patch)
|
||||||
|
case *HttpRule_Custom:
|
||||||
|
b.EncodeVarint(8<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.Custom); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||||
|
m := msg.(*HttpRule)
|
||||||
|
switch tag {
|
||||||
|
case 2: // pattern.get
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Get{x}
|
||||||
|
return true, err
|
||||||
|
case 3: // pattern.put
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Put{x}
|
||||||
|
return true, err
|
||||||
|
case 4: // pattern.post
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Post{x}
|
||||||
|
return true, err
|
||||||
|
case 5: // pattern.delete
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Delete{x}
|
||||||
|
return true, err
|
||||||
|
case 6: // pattern.patch
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Patch{x}
|
||||||
|
return true, err
|
||||||
|
case 8: // pattern.custom
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(CustomHttpPattern)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Pattern = &HttpRule_Custom{msg}
|
||||||
|
return true, err
|
||||||
|
default:
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HttpRule_OneofSizer(msg proto.Message) (n int) {
|
||||||
|
m := msg.(*HttpRule)
|
||||||
|
// pattern
|
||||||
|
switch x := m.Pattern.(type) {
|
||||||
|
case *HttpRule_Get:
|
||||||
|
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Get)))
|
||||||
|
n += len(x.Get)
|
||||||
|
case *HttpRule_Put:
|
||||||
|
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Put)))
|
||||||
|
n += len(x.Put)
|
||||||
|
case *HttpRule_Post:
|
||||||
|
n += proto.SizeVarint(4<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Post)))
|
||||||
|
n += len(x.Post)
|
||||||
|
case *HttpRule_Delete:
|
||||||
|
n += proto.SizeVarint(5<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Delete)))
|
||||||
|
n += len(x.Delete)
|
||||||
|
case *HttpRule_Patch:
|
||||||
|
n += proto.SizeVarint(6<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Patch)))
|
||||||
|
n += len(x.Patch)
|
||||||
|
case *HttpRule_Custom:
|
||||||
|
s := proto.Size(x.Custom)
|
||||||
|
n += proto.SizeVarint(8<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// A custom pattern is used for defining custom HTTP verb.
|
||||||
|
type CustomHttpPattern struct {
|
||||||
|
// The name of this custom HTTP verb.
|
||||||
|
Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"`
|
||||||
|
// The path matched by this custom verb.
|
||||||
|
Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
|
||||||
|
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*CustomHttpPattern) ProtoMessage() {}
|
||||||
|
func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
|
||||||
|
|
||||||
|
func (m *CustomHttpPattern) GetKind() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Kind
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CustomHttpPattern) GetPath() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Path
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Http)(nil), "google.api.Http")
|
||||||
|
proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
|
||||||
|
proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) }
|
||||||
|
|
||||||
|
var fileDescriptor1 = []byte{
|
||||||
|
// 359 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30,
|
||||||
|
0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29,
|
||||||
|
0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1,
|
||||||
|
0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe,
|
||||||
|
0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8,
|
||||||
|
0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39,
|
||||||
|
0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62,
|
||||||
|
0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18,
|
||||||
|
0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2,
|
||||||
|
0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48,
|
||||||
|
0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24,
|
||||||
|
0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49,
|
||||||
|
0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc,
|
||||||
|
0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84,
|
||||||
|
0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12,
|
||||||
|
0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74,
|
||||||
|
0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4,
|
||||||
|
0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67,
|
||||||
|
0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90,
|
||||||
|
0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64,
|
||||||
|
0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a,
|
||||||
|
0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68,
|
||||||
|
0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00,
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user