Level sets dependency graph to consume etcd 3.1.5
This commit is contained in:
426
Godeps/Godeps.json
generated
426
Godeps/Godeps.json
generated
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "k8s.io/kubernetes",
|
"ImportPath": "k8s.io/kubernetes",
|
||||||
"GoVersion": "go1.7",
|
"GoVersion": "go1.7",
|
||||||
"GodepVersion": "v74",
|
"GodepVersion": "v79",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"github.com/ugorji/go/codec/codecgen",
|
"github.com/ugorji/go/codec/codecgen",
|
||||||
"github.com/onsi/ginkgo/ginkgo",
|
"github.com/onsi/ginkgo/ginkgo",
|
||||||
@@ -431,263 +431,283 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/alarm",
|
"ImportPath": "github.com/coreos/etcd/alarm",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth",
|
"ImportPath": "github.com/coreos/etcd/auth",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/client",
|
"ImportPath": "github.com/coreos/etcd/client",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/compactor",
|
"ImportPath": "github.com/coreos/etcd/compactor",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/discovery",
|
"ImportPath": "github.com/coreos/etcd/discovery",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/error",
|
"ImportPath": "github.com/coreos/etcd/error",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/integration",
|
"ImportPath": "github.com/coreos/etcd/integration",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease",
|
"ImportPath": "github.com/coreos/etcd/lease",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc",
|
"ImportPath": "github.com/coreos/etcd/mvcc",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/etcd/pkg/cpuutil",
|
||||||
|
"Comment": "v3.1.5",
|
||||||
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/etcd/pkg/monotime",
|
||||||
|
"Comment": "v3.1.5",
|
||||||
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy",
|
||||||
|
"Comment": "v3.1.5",
|
||||||
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
||||||
|
"Comment": "v3.1.5",
|
||||||
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/raft",
|
"ImportPath": "github.com/coreos/etcd/raft",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/snap",
|
"ImportPath": "github.com/coreos/etcd/snap",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/store",
|
"ImportPath": "github.com/coreos/etcd/store",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/version",
|
"ImportPath": "github.com/coreos/etcd/version",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/wal",
|
"ImportPath": "github.com/coreos/etcd/wal",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
||||||
"Comment": "v3.0.17",
|
"Comment": "v3.1.5",
|
||||||
"Rev": "cc198e22d3b8fd7ec98304c95e68ee375be54589"
|
"Rev": "20490caaf0dcd96bb4a95e40625559def8ef5b04"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/go-oidc/http",
|
"ImportPath": "github.com/coreos/go-oidc/http",
|
||||||
@@ -765,8 +785,8 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/rkt/api/v1alpha",
|
"ImportPath": "github.com/coreos/rkt/api/v1alpha",
|
||||||
"Comment": "v1.11.0-59-ga83419b",
|
"Comment": "v1.25.0",
|
||||||
"Rev": "a83419be28ac626876f94a28b4df2dbc9eac7448"
|
"Rev": "ec37f3cb649bfb72408906e7cbf330e4aeda1075"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
|
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
|
||||||
@@ -1037,128 +1057,128 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/gogoproto",
|
"ImportPath": "github.com/gogo/protobuf/gogoproto",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/compare",
|
"ImportPath": "github.com/gogo/protobuf/plugin/compare",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
|
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/description",
|
"ImportPath": "github.com/gogo/protobuf/plugin/description",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
|
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
|
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/equal",
|
"ImportPath": "github.com/gogo/protobuf/plugin/equal",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/face",
|
"ImportPath": "github.com/gogo/protobuf/plugin/face",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/gostring",
|
"ImportPath": "github.com/gogo/protobuf/plugin/gostring",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
|
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
|
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/populate",
|
"ImportPath": "github.com/gogo/protobuf/plugin/populate",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/size",
|
"ImportPath": "github.com/gogo/protobuf/plugin/size",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/stringer",
|
"ImportPath": "github.com/gogo/protobuf/plugin/stringer",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/testgen",
|
"ImportPath": "github.com/gogo/protobuf/plugin/testgen",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/union",
|
"ImportPath": "github.com/gogo/protobuf/plugin/union",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
|
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
|
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
|
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
|
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
|
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/sortkeys",
|
"ImportPath": "github.com/gogo/protobuf/sortkeys",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/vanity",
|
"ImportPath": "github.com/gogo/protobuf/vanity",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/vanity/command",
|
"ImportPath": "github.com/gogo/protobuf/vanity/command",
|
||||||
"Comment": "v0.2-33-ge18d7aa",
|
"Comment": "v0.4-3-gc0656ed",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/glog",
|
"ImportPath": "github.com/golang/glog",
|
||||||
@@ -1174,11 +1194,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/jsonpb",
|
"ImportPath": "github.com/golang/protobuf/jsonpb",
|
||||||
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
|
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
|
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/btree",
|
"ImportPath": "github.com/google/btree",
|
||||||
@@ -1539,21 +1559,27 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "github.com/gorilla/websocket",
|
"ImportPath": "github.com/gorilla/websocket",
|
||||||
"Rev": "6eb6ad425a89d9da7a5549bc6da8f79ba5c17844"
|
"Rev": "6eb6ad425a89d9da7a5549bc6da8f79ba5c17844"
|
||||||
|
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||||
|
"Comment": "v1.1-4-g2500245",
|
||||||
|
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||||
"Comment": "v1.0.0-8-gf52d055",
|
"Comment": "v1.1.0-25-g84398b9",
|
||||||
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
|
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
|
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
|
||||||
"Comment": "v1.0.0-8-gf52d055",
|
"Comment": "v1.1.0-25-g84398b9",
|
||||||
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
|
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
||||||
"Comment": "v1.0.0-8-gf52d055",
|
"Comment": "v1.1.0-25-g84398b9",
|
||||||
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
|
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||||
@@ -1679,6 +1705,11 @@
|
|||||||
"ImportPath": "github.com/kardianos/osext",
|
"ImportPath": "github.com/kardianos/osext",
|
||||||
"Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef"
|
"Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/karlseguin/ccache",
|
||||||
|
"Comment": "v2.0.2-5-g3ba9789",
|
||||||
|
"Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/kr/fs",
|
"ImportPath": "github.com/kr/fs",
|
||||||
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||||
@@ -2079,7 +2110,6 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/pelletier/go-buffruneio",
|
"ImportPath": "github.com/pelletier/go-buffruneio",
|
||||||
"Comment": "v0.1.0",
|
|
||||||
"Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d"
|
"Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -2508,51 +2538,51 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html",
|
"ImportPath": "golang.org/x/net/html",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/html/atom",
|
"ImportPath": "golang.org/x/net/html/atom",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/proxy",
|
"ImportPath": "golang.org/x/net/proxy",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
"ImportPath": "golang.org/x/net/trace",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/websocket",
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/oauth2",
|
"ImportPath": "golang.org/x/oauth2",
|
||||||
@@ -2638,6 +2668,10 @@
|
|||||||
"ImportPath": "golang.org/x/text/width",
|
"ImportPath": "golang.org/x/text/width",
|
||||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/time/rate",
|
||||||
|
"Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
|
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
|
||||||
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
|
"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
|
||||||
@@ -2704,48 +2738,48 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc",
|
"ImportPath": "google.golang.org/grpc",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
"ImportPath": "google.golang.org/grpc/codes",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
"ImportPath": "google.golang.org/grpc/credentials",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
"ImportPath": "google.golang.org/grpc/internal",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
"ImportPath": "google.golang.org/grpc/metadata",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
"ImportPath": "google.golang.org/grpc/naming",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
"ImportPath": "google.golang.org/grpc/peer",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
"ImportPath": "google.golang.org/grpc/transport",
|
||||||
"Comment": "v1.0.0-183-g231b4cf",
|
"Comment": "v1.0.4",
|
||||||
"Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3"
|
"Rev": "777daa17ff9b5daef1cfdf915088a2ada3332bf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/gcfg.v1",
|
"ImportPath": "gopkg.in/gcfg.v1",
|
||||||
|
|||||||
1310
Godeps/LICENSES
generated
1310
Godeps/LICENSES
generated
File diff suppressed because it is too large
Load Diff
18
staging/src/k8s.io/client-go/Godeps/Godeps.json
generated
18
staging/src/k8s.io/client-go/Godeps/Godeps.json
generated
@@ -100,11 +100,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/sortkeys",
|
"ImportPath": "github.com/gogo/protobuf/sortkeys",
|
||||||
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
|
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/glog",
|
"ImportPath": "github.com/golang/glog",
|
||||||
@@ -116,7 +116,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
|
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/gofuzz",
|
"ImportPath": "github.com/google/gofuzz",
|
||||||
@@ -172,27 +172,27 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
"ImportPath": "golang.org/x/net/http2",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/idna",
|
"ImportPath": "golang.org/x/net/idna",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/oauth2",
|
"ImportPath": "golang.org/x/oauth2",
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
const (
|
|
||||||
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
|
|
||||||
// in the Annotations of a Pod.
|
|
||||||
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
|
|
||||||
|
|
||||||
// TaintsAnnotationKey represents the key of taints data (json serialized)
|
|
||||||
// in the Annotations of a Node.
|
|
||||||
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
|
|
||||||
|
|
||||||
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
|
|
||||||
// to all containers of a pod.
|
|
||||||
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
|
|
||||||
|
|
||||||
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
|
|
||||||
// to one container of a pod.
|
|
||||||
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
|
|
||||||
|
|
||||||
// CreatedByAnnotation represents the key used to store the spec(json)
|
|
||||||
// used to create the resource.
|
|
||||||
CreatedByAnnotation = "kubernetes.io/created-by"
|
|
||||||
|
|
||||||
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
|
|
||||||
// in the Annotations of a Node.
|
|
||||||
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
|
|
||||||
|
|
||||||
// SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
|
||||||
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
|
||||||
// key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by
|
|
||||||
// the kubelet. Pods with other sysctls will fail to launch.
|
|
||||||
SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
|
|
||||||
|
|
||||||
// UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
|
||||||
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
|
||||||
// key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly
|
|
||||||
// namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use
|
|
||||||
// is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
|
|
||||||
// will fail to launch.
|
|
||||||
UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
|
|
||||||
|
|
||||||
// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
|
|
||||||
// an object (e.g. secret, config map) before fetching it again from apiserver.
|
|
||||||
// This annotation can be attached to node.
|
|
||||||
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
|
||||||
|
|
||||||
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
|
||||||
// in the Annotations of a Pod.
|
|
||||||
// TODO: remove when alpha support for affinity is removed
|
|
||||||
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
|
||||||
)
|
|
||||||
@@ -429,6 +429,56 @@ func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.S
|
|||||||
return selector, nil
|
return selector, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
|
||||||
|
// in the Annotations of a Pod.
|
||||||
|
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
|
||||||
|
|
||||||
|
// TaintsAnnotationKey represents the key of taints data (json serialized)
|
||||||
|
// in the Annotations of a Node.
|
||||||
|
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
|
||||||
|
|
||||||
|
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
|
||||||
|
// to all containers of a pod.
|
||||||
|
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
|
||||||
|
|
||||||
|
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
|
||||||
|
// to one container of a pod.
|
||||||
|
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
|
||||||
|
|
||||||
|
// CreatedByAnnotation represents the key used to store the spec(json)
|
||||||
|
// used to create the resource.
|
||||||
|
CreatedByAnnotation = "kubernetes.io/created-by"
|
||||||
|
|
||||||
|
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
|
||||||
|
// in the Annotations of a Node.
|
||||||
|
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
|
||||||
|
|
||||||
|
// SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
||||||
|
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
||||||
|
// key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by
|
||||||
|
// the kubelet. Pods with other sysctls will fail to launch.
|
||||||
|
SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
|
||||||
|
|
||||||
|
// UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
||||||
|
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
||||||
|
// key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly
|
||||||
|
// namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use
|
||||||
|
// is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
|
||||||
|
// will fail to launch.
|
||||||
|
UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
|
||||||
|
|
||||||
|
// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
|
||||||
|
// an object (e.g. secret, config map) before fetching it again from apiserver.
|
||||||
|
// This annotation can be attached to node.
|
||||||
|
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||||
|
|
||||||
|
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
||||||
|
// in the Annotations of a Pod.
|
||||||
|
// TODO: remove when alpha support for affinity is removed
|
||||||
|
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||||
|
)
|
||||||
|
|
||||||
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
|
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
|
||||||
// and converts it to the []Toleration type in api.
|
// and converts it to the []Toleration type in api.
|
||||||
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) {
|
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) {
|
||||||
@@ -444,7 +494,7 @@ func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Tolerati
|
|||||||
|
|
||||||
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
|
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
|
||||||
// Returns true if something was updated, false otherwise.
|
// Returns true if something was updated, false otherwise.
|
||||||
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) bool {
|
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) {
|
||||||
podTolerations := pod.Spec.Tolerations
|
podTolerations := pod.Spec.Tolerations
|
||||||
|
|
||||||
var newTolerations []Toleration
|
var newTolerations []Toleration
|
||||||
@@ -452,7 +502,7 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) bool {
|
|||||||
for i := range podTolerations {
|
for i := range podTolerations {
|
||||||
if toleration.MatchToleration(&podTolerations[i]) {
|
if toleration.MatchToleration(&podTolerations[i]) {
|
||||||
if Semantic.DeepEqual(toleration, podTolerations[i]) {
|
if Semantic.DeepEqual(toleration, podTolerations[i]) {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
newTolerations = append(newTolerations, *toleration)
|
newTolerations = append(newTolerations, *toleration)
|
||||||
updated = true
|
updated = true
|
||||||
@@ -467,7 +517,7 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pod.Spec.Tolerations = newTolerations
|
pod.Spec.Tolerations = newTolerations
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
|
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
|
||||||
|
|||||||
@@ -17,9 +17,6 @@ limitations under the License.
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
@@ -230,41 +227,3 @@ func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, li
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtractContainerResourceValue extracts the value of a resource
|
|
||||||
// in an already known container
|
|
||||||
func ExtractContainerResourceValue(fs *ResourceFieldSelector, container *Container) (string, error) {
|
|
||||||
divisor := resource.Quantity{}
|
|
||||||
if divisor.Cmp(fs.Divisor) == 0 {
|
|
||||||
divisor = resource.MustParse("1")
|
|
||||||
} else {
|
|
||||||
divisor = fs.Divisor
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fs.Resource {
|
|
||||||
case "limits.cpu":
|
|
||||||
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
|
|
||||||
case "limits.memory":
|
|
||||||
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
|
|
||||||
case "requests.cpu":
|
|
||||||
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
|
|
||||||
case "requests.memory":
|
|
||||||
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("unsupported container resource : %v", fs.Resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
|
||||||
// ceiling of the value.
|
|
||||||
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
|
|
||||||
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
|
|
||||||
return strconv.FormatInt(c, 10), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertResourceMemoryToString converts memory value to the format of divisor and returns
|
|
||||||
// ceiling of the value.
|
|
||||||
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
|
|
||||||
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
|
|
||||||
return strconv.FormatInt(m, 10), nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -653,20 +653,10 @@ type ISCSIVolumeSource struct {
|
|||||||
// the ReadOnly setting in VolumeMounts.
|
// the ReadOnly setting in VolumeMounts.
|
||||||
// +optional
|
// +optional
|
||||||
ReadOnly bool
|
ReadOnly bool
|
||||||
// Optional: list of iSCSI target portal ips for high availability.
|
// Required: list of iSCSI target portal ips for high availability.
|
||||||
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
|
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
|
||||||
// +optional
|
// +optional
|
||||||
Portals []string
|
Portals []string
|
||||||
// Optional: whether support iSCSI Discovery CHAP authentication
|
|
||||||
// +optional
|
|
||||||
DiscoveryCHAPAuth bool
|
|
||||||
// Optional: whether support iSCSI Session CHAP authentication
|
|
||||||
// +optional
|
|
||||||
SessionCHAPAuth bool
|
|
||||||
// Optional: CHAP secret for iSCSI target and initiator authentication.
|
|
||||||
// The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true
|
|
||||||
// +optional
|
|
||||||
SecretRef *LocalObjectReference
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Represents a Fibre Channel volume.
|
// Represents a Fibre Channel volume.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -245,8 +245,6 @@ message ComponentStatus {
|
|||||||
|
|
||||||
// List of component conditions observed
|
// List of component conditions observed
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated ComponentCondition conditions = 2;
|
repeated ComponentCondition conditions = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -417,8 +415,6 @@ message Container {
|
|||||||
// accessible from the network.
|
// accessible from the network.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=containerPort
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated ContainerPort ports = 6;
|
repeated ContainerPort ports = 6;
|
||||||
|
|
||||||
// List of sources to populate environment variables in the container.
|
// List of sources to populate environment variables in the container.
|
||||||
@@ -433,8 +429,6 @@ message Container {
|
|||||||
// List of environment variables to set in the container.
|
// List of environment variables to set in the container.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated EnvVar env = 7;
|
repeated EnvVar env = 7;
|
||||||
|
|
||||||
// Compute Resources required by this container.
|
// Compute Resources required by this container.
|
||||||
@@ -446,8 +440,6 @@ message Container {
|
|||||||
// Pod volumes to mount into the container's filesystem.
|
// Pod volumes to mount into the container's filesystem.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=mountPath
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated VolumeMount volumeMounts = 9;
|
repeated VolumeMount volumeMounts = 9;
|
||||||
|
|
||||||
// Periodic probe of container liveness.
|
// Periodic probe of container liveness.
|
||||||
@@ -1235,18 +1227,6 @@ message ISCSIVolumeSource {
|
|||||||
// is other than default (typically TCP ports 860 and 3260).
|
// is other than default (typically TCP ports 860 and 3260).
|
||||||
// +optional
|
// +optional
|
||||||
repeated string portals = 7;
|
repeated string portals = 7;
|
||||||
|
|
||||||
// whether support iSCSI Discovery CHAP authentication
|
|
||||||
// +optional
|
|
||||||
optional bool chapAuthDiscovery = 8;
|
|
||||||
|
|
||||||
// whether support iSCSI Session CHAP authentication
|
|
||||||
// +optional
|
|
||||||
optional bool chapAuthSession = 11;
|
|
||||||
|
|
||||||
// CHAP secret for iSCSI target and initiator authentication
|
|
||||||
// +optional
|
|
||||||
optional LocalObjectReference secretRef = 10;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Maps a string key to a path within a volume.
|
// Maps a string key to a path within a volume.
|
||||||
@@ -1611,8 +1591,6 @@ message NodeSelector {
|
|||||||
// that relates the key and values.
|
// that relates the key and values.
|
||||||
message NodeSelectorRequirement {
|
message NodeSelectorRequirement {
|
||||||
// The label key that the selector applies to.
|
// The label key that the selector applies to.
|
||||||
// +patchMergeKey=key
|
|
||||||
// +patchStrategy=merge
|
|
||||||
optional string key = 1;
|
optional string key = 1;
|
||||||
|
|
||||||
// Represents a key's relationship to a set of values.
|
// Represents a key's relationship to a set of values.
|
||||||
@@ -1680,16 +1658,12 @@ message NodeStatus {
|
|||||||
// Conditions is an array of current observed node conditions.
|
// Conditions is an array of current observed node conditions.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition
|
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated NodeCondition conditions = 4;
|
repeated NodeCondition conditions = 4;
|
||||||
|
|
||||||
// List of addresses reachable to the node.
|
// List of addresses reachable to the node.
|
||||||
// Queried from cloud provider, if available.
|
// Queried from cloud provider, if available.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses
|
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated NodeAddress addresses = 5;
|
repeated NodeAddress addresses = 5;
|
||||||
|
|
||||||
// Endpoints of daemons running on the Node.
|
// Endpoints of daemons running on the Node.
|
||||||
@@ -1895,8 +1869,6 @@ message ObjectMeta {
|
|||||||
// then an entry in this list will point to this controller, with the controller field set to true.
|
// then an entry in this list will point to this controller, with the controller field set to true.
|
||||||
// There cannot be more than one managing controller.
|
// There cannot be more than one managing controller.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=uid
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13;
|
repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13;
|
||||||
|
|
||||||
// Must be empty before the object is deleted from the registry. Each entry
|
// Must be empty before the object is deleted from the registry. Each entry
|
||||||
@@ -1904,7 +1876,6 @@ message ObjectMeta {
|
|||||||
// from the list. If the deletionTimestamp of the object is non-nil, entries
|
// from the list. If the deletionTimestamp of the object is non-nil, entries
|
||||||
// in this list can only be removed.
|
// in this list can only be removed.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated string finalizers = 14;
|
repeated string finalizers = 14;
|
||||||
|
|
||||||
// The name of the cluster which the object belongs to.
|
// The name of the cluster which the object belongs to.
|
||||||
@@ -2591,8 +2562,6 @@ message PodSpec {
|
|||||||
// List of volumes that can be mounted by containers belonging to the pod.
|
// List of volumes that can be mounted by containers belonging to the pod.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/volumes
|
// More info: http://kubernetes.io/docs/user-guide/volumes
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated Volume volumes = 1;
|
repeated Volume volumes = 1;
|
||||||
|
|
||||||
// List of initialization containers belonging to the pod.
|
// List of initialization containers belonging to the pod.
|
||||||
@@ -2608,8 +2577,6 @@ message PodSpec {
|
|||||||
// Init containers cannot currently be added or removed.
|
// Init containers cannot currently be added or removed.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/containers
|
// More info: http://kubernetes.io/docs/user-guide/containers
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated Container initContainers = 20;
|
repeated Container initContainers = 20;
|
||||||
|
|
||||||
// List of containers belonging to the pod.
|
// List of containers belonging to the pod.
|
||||||
@@ -2617,8 +2584,6 @@ message PodSpec {
|
|||||||
// There must be at least one container in a Pod.
|
// There must be at least one container in a Pod.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/containers
|
// More info: http://kubernetes.io/docs/user-guide/containers
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated Container containers = 2;
|
repeated Container containers = 2;
|
||||||
|
|
||||||
// Restart policy for all containers within the pod.
|
// Restart policy for all containers within the pod.
|
||||||
@@ -2707,8 +2672,6 @@ message PodSpec {
|
|||||||
// in the case of docker, only DockerConfig type secrets are honored.
|
// in the case of docker, only DockerConfig type secrets are honored.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
|
// More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated LocalObjectReference imagePullSecrets = 15;
|
repeated LocalObjectReference imagePullSecrets = 15;
|
||||||
|
|
||||||
// Specifies the hostname of the Pod
|
// Specifies the hostname of the Pod
|
||||||
@@ -2746,8 +2709,6 @@ message PodStatus {
|
|||||||
// Current service state of pod.
|
// Current service state of pod.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions
|
// More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated PodCondition conditions = 2;
|
repeated PodCondition conditions = 2;
|
||||||
|
|
||||||
// A human readable message indicating details about why the pod is in this condition.
|
// A human readable message indicating details about why the pod is in this condition.
|
||||||
@@ -3146,8 +3107,6 @@ message ReplicationControllerStatus {
|
|||||||
|
|
||||||
// Represents the latest available observations of a replication controller's current state.
|
// Represents the latest available observations of a replication controller's current state.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated ReplicationControllerCondition conditions = 6;
|
repeated ReplicationControllerCondition conditions = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3509,8 +3468,6 @@ message ServiceAccount {
|
|||||||
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
|
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/secrets
|
// More info: http://kubernetes.io/docs/user-guide/secrets
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated ObjectReference secrets = 2;
|
repeated ObjectReference secrets = 2;
|
||||||
|
|
||||||
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
|
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
|
||||||
@@ -3601,8 +3558,6 @@ message ServiceProxyOptions {
|
|||||||
message ServiceSpec {
|
message ServiceSpec {
|
||||||
// The list of ports that are exposed by this service.
|
// The list of ports that are exposed by this service.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies
|
// More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies
|
||||||
// +patchMergeKey=port
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated ServicePort ports = 1;
|
repeated ServicePort ports = 1;
|
||||||
|
|
||||||
// Route service traffic to pods with label keys and values matching this
|
// Route service traffic to pods with label keys and values matching this
|
||||||
@@ -3722,8 +3677,6 @@ message TCPSocketAction {
|
|||||||
// any pod that that does not tolerate the Taint.
|
// any pod that that does not tolerate the Taint.
|
||||||
message Taint {
|
message Taint {
|
||||||
// Required. The taint key to be applied to a node.
|
// Required. The taint key to be applied to a node.
|
||||||
// +patchMergeKey=key
|
|
||||||
// +patchStrategy=merge
|
|
||||||
optional string key = 1;
|
optional string key = 1;
|
||||||
|
|
||||||
// Required. The taint value corresponding to the taint key.
|
// Required. The taint value corresponding to the taint key.
|
||||||
@@ -3747,8 +3700,6 @@ message Toleration {
|
|||||||
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||||
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=key
|
|
||||||
// +patchStrategy=merge
|
|
||||||
optional string key = 1;
|
optional string key = 1;
|
||||||
|
|
||||||
// Operator represents a key's relationship to the value.
|
// Operator represents a key's relationship to the value.
|
||||||
|
|||||||
@@ -276,9 +276,9 @@ const (
|
|||||||
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
|
// Tries to add a toleration to annotations list. Returns true if something was updated
|
||||||
// Returns true if something was updated, false otherwise.
|
// false otherwise.
|
||||||
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) bool {
|
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) {
|
||||||
podTolerations := pod.Spec.Tolerations
|
podTolerations := pod.Spec.Tolerations
|
||||||
|
|
||||||
var newTolerations []Toleration
|
var newTolerations []Toleration
|
||||||
@@ -286,7 +286,7 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) bool {
|
|||||||
for i := range podTolerations {
|
for i := range podTolerations {
|
||||||
if toleration.MatchToleration(&podTolerations[i]) {
|
if toleration.MatchToleration(&podTolerations[i]) {
|
||||||
if api.Semantic.DeepEqual(toleration, podTolerations[i]) {
|
if api.Semantic.DeepEqual(toleration, podTolerations[i]) {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
newTolerations = append(newTolerations, *toleration)
|
newTolerations = append(newTolerations, *toleration)
|
||||||
updated = true
|
updated = true
|
||||||
@@ -301,7 +301,7 @@ func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pod.Spec.Tolerations = newTolerations
|
pod.Spec.Tolerations = newTolerations
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
|
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
|
||||||
|
|||||||
@@ -35,8 +35,6 @@ func (meta *ObjectMeta) GetUID() types.UID { return meta.UID }
|
|||||||
func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid }
|
func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid }
|
||||||
func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion }
|
func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion }
|
||||||
func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
|
func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
|
||||||
func (meta *ObjectMeta) GetGeneration() int64 { return meta.Generation }
|
|
||||||
func (meta *ObjectMeta) SetGeneration(generation int64) { meta.Generation = generation }
|
|
||||||
func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink }
|
func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink }
|
||||||
func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
|
func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
|
||||||
func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp }
|
func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp }
|
||||||
@@ -47,10 +45,6 @@ func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { return meta.Deleti
|
|||||||
func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) {
|
func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) {
|
||||||
meta.DeletionTimestamp = deletionTimestamp
|
meta.DeletionTimestamp = deletionTimestamp
|
||||||
}
|
}
|
||||||
func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds }
|
|
||||||
func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
|
|
||||||
meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds
|
|
||||||
}
|
|
||||||
func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels }
|
func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels }
|
||||||
func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
|
func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
|
||||||
func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
|
func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
|
||||||
|
|||||||
@@ -17,14 +17,10 @@ limitations under the License.
|
|||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/pkg/api"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns string version of ResourceName.
|
// Returns string version of ResourceName.
|
||||||
@@ -259,100 +255,3 @@ func GetResourceRequest(pod *Pod, resource ResourceName) int64 {
|
|||||||
}
|
}
|
||||||
return totalResources
|
return totalResources
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtractResourceValueByContainerName extracts the value of a resource
|
|
||||||
// by providing container name
|
|
||||||
func ExtractResourceValueByContainerName(fs *ResourceFieldSelector, pod *Pod, containerName string) (string, error) {
|
|
||||||
container, err := findContainerInPod(pod, containerName)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return ExtractContainerResourceValue(fs, container)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource
|
|
||||||
// by providing container name and node allocatable
|
|
||||||
func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *ResourceFieldSelector, pod *Pod, containerName string, nodeAllocatable ResourceList) (string, error) {
|
|
||||||
realContainer, err := findContainerInPod(pod, containerName)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
containerCopy, err := api.Scheme.DeepCopy(realContainer)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
container, ok := containerCopy.(*Container)
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("unexpected type returned from deep copy of container object")
|
|
||||||
}
|
|
||||||
|
|
||||||
MergeContainerResourceLimits(container, nodeAllocatable)
|
|
||||||
|
|
||||||
return ExtractContainerResourceValue(fs, container)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractContainerResourceValue extracts the value of a resource
|
|
||||||
// in an already known container
|
|
||||||
func ExtractContainerResourceValue(fs *ResourceFieldSelector, container *Container) (string, error) {
|
|
||||||
divisor := resource.Quantity{}
|
|
||||||
if divisor.Cmp(fs.Divisor) == 0 {
|
|
||||||
divisor = resource.MustParse("1")
|
|
||||||
} else {
|
|
||||||
divisor = fs.Divisor
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fs.Resource {
|
|
||||||
case "limits.cpu":
|
|
||||||
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
|
|
||||||
case "limits.memory":
|
|
||||||
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
|
|
||||||
case "requests.cpu":
|
|
||||||
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
|
|
||||||
case "requests.memory":
|
|
||||||
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
|
||||||
// ceiling of the value.
|
|
||||||
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
|
|
||||||
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
|
|
||||||
return strconv.FormatInt(c, 10), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertResourceMemoryToString converts memory value to the format of divisor and returns
|
|
||||||
// ceiling of the value.
|
|
||||||
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
|
|
||||||
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
|
|
||||||
return strconv.FormatInt(m, 10), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findContainerInPod finds a container by its name in the provided pod
|
|
||||||
func findContainerInPod(pod *Pod, containerName string) (*Container, error) {
|
|
||||||
for _, container := range pod.Spec.Containers {
|
|
||||||
if container.Name == containerName {
|
|
||||||
return &container, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("container %s not found", containerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeContainerResourceLimits checks if a limit is applied for
|
|
||||||
// the container, and if not, it sets the limit to the passed resource list.
|
|
||||||
func MergeContainerResourceLimits(container *Container,
|
|
||||||
allocatable ResourceList) {
|
|
||||||
if container.Resources.Limits == nil {
|
|
||||||
container.Resources.Limits = make(ResourceList)
|
|
||||||
}
|
|
||||||
for _, resource := range []ResourceName{ResourceCPU, ResourceMemory} {
|
|
||||||
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
|
|
||||||
if cap, exists := allocatable[resource]; exists {
|
|
||||||
container.Resources.Limits[resource] = *cap.Copy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -15958,19 +15958,16 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||||||
} else {
|
} else {
|
||||||
yysep2 := !z.EncBinary()
|
yysep2 := !z.EncBinary()
|
||||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||||
var yyq2 [10]bool
|
var yyq2 [7]bool
|
||||||
_, _, _ = yysep2, yyq2, yy2arr2
|
_, _, _ = yysep2, yyq2, yy2arr2
|
||||||
const yyr2 bool = false
|
const yyr2 bool = false
|
||||||
yyq2[3] = x.ISCSIInterface != ""
|
yyq2[3] = x.ISCSIInterface != ""
|
||||||
yyq2[4] = x.FSType != ""
|
yyq2[4] = x.FSType != ""
|
||||||
yyq2[5] = x.ReadOnly != false
|
yyq2[5] = x.ReadOnly != false
|
||||||
yyq2[6] = len(x.Portals) != 0
|
yyq2[6] = len(x.Portals) != 0
|
||||||
yyq2[7] = x.DiscoveryCHAPAuth != false
|
|
||||||
yyq2[8] = x.SessionCHAPAuth != false
|
|
||||||
yyq2[9] = x.SecretRef != nil
|
|
||||||
var yynn2 int
|
var yynn2 int
|
||||||
if yyr2 || yy2arr2 {
|
if yyr2 || yy2arr2 {
|
||||||
r.EncodeArrayStart(10)
|
r.EncodeArrayStart(7)
|
||||||
} else {
|
} else {
|
||||||
yynn2 = 3
|
yynn2 = 3
|
||||||
for _, b := range yyq2 {
|
for _, b := range yyq2 {
|
||||||
@@ -16146,79 +16143,6 @@ func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[7] {
|
|
||||||
yym25 := z.EncBinary()
|
|
||||||
_ = yym25
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.DiscoveryCHAPAuth))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(false)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[7] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("chapAuthDiscovery"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym26 := z.EncBinary()
|
|
||||||
_ = yym26
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.DiscoveryCHAPAuth))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[8] {
|
|
||||||
yym28 := z.EncBinary()
|
|
||||||
_ = yym28
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.SessionCHAPAuth))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(false)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[8] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("chapAuthSession"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
yym29 := z.EncBinary()
|
|
||||||
_ = yym29
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
r.EncodeBool(bool(x.SessionCHAPAuth))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if yyq2[9] {
|
|
||||||
if x.SecretRef == nil {
|
|
||||||
r.EncodeNil()
|
|
||||||
} else {
|
|
||||||
x.SecretRef.CodecEncodeSelf(e)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.EncodeNil()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if yyq2[9] {
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("secretRef"))
|
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
|
||||||
if x.SecretRef == nil {
|
|
||||||
r.EncodeNil()
|
|
||||||
} else {
|
|
||||||
x.SecretRef.CodecEncodeSelf(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if yyr2 || yy2arr2 {
|
if yyr2 || yy2arr2 {
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
} else {
|
} else {
|
||||||
@@ -16364,41 +16288,6 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder)
|
|||||||
z.F.DecSliceStringX(yyv16, false, d)
|
z.F.DecSliceStringX(yyv16, false, d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "chapAuthDiscovery":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DiscoveryCHAPAuth = false
|
|
||||||
} else {
|
|
||||||
yyv18 := &x.DiscoveryCHAPAuth
|
|
||||||
yym19 := z.DecBinary()
|
|
||||||
_ = yym19
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv18)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "chapAuthSession":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.SessionCHAPAuth = false
|
|
||||||
} else {
|
|
||||||
yyv20 := &x.SessionCHAPAuth
|
|
||||||
yym21 := z.DecBinary()
|
|
||||||
_ = yym21
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv20)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "secretRef":
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
if x.SecretRef != nil {
|
|
||||||
x.SecretRef = nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if x.SecretRef == nil {
|
|
||||||
x.SecretRef = new(LocalObjectReference)
|
|
||||||
}
|
|
||||||
x.SecretRef.CodecDecodeSelf(d)
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
z.DecStructFieldNotFound(-1, yys3)
|
z.DecStructFieldNotFound(-1, yys3)
|
||||||
} // end switch yys3
|
} // end switch yys3
|
||||||
@@ -16410,16 +16299,16 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
var h codecSelfer1234
|
var h codecSelfer1234
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
z, r := codec1978.GenHelperDecoder(d)
|
||||||
_, _, _ = h, z, r
|
_, _, _ = h, z, r
|
||||||
var yyj23 int
|
var yyj18 int
|
||||||
var yyb23 bool
|
var yyb18 bool
|
||||||
var yyhl23 bool = l >= 0
|
var yyhl18 bool = l >= 0
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -16427,21 +16316,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.TargetPortal = ""
|
x.TargetPortal = ""
|
||||||
} else {
|
} else {
|
||||||
yyv24 := &x.TargetPortal
|
yyv19 := &x.TargetPortal
|
||||||
yym25 := z.DecBinary()
|
yym20 := z.DecBinary()
|
||||||
_ = yym25
|
_ = yym20
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
*((*string)(yyv24)) = r.DecodeString()
|
*((*string)(yyv19)) = r.DecodeString()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -16449,21 +16338,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.IQN = ""
|
x.IQN = ""
|
||||||
} else {
|
} else {
|
||||||
yyv26 := &x.IQN
|
yyv21 := &x.IQN
|
||||||
yym27 := z.DecBinary()
|
yym22 := z.DecBinary()
|
||||||
_ = yym27
|
_ = yym22
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
*((*string)(yyv26)) = r.DecodeString()
|
*((*string)(yyv21)) = r.DecodeString()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -16471,21 +16360,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.Lun = 0
|
x.Lun = 0
|
||||||
} else {
|
} else {
|
||||||
yyv28 := &x.Lun
|
yyv23 := &x.Lun
|
||||||
yym29 := z.DecBinary()
|
yym24 := z.DecBinary()
|
||||||
_ = yym29
|
_ = yym24
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
*((*int32)(yyv28)) = int32(r.DecodeInt(32))
|
*((*int32)(yyv23)) = int32(r.DecodeInt(32))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -16493,21 +16382,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.ISCSIInterface = ""
|
x.ISCSIInterface = ""
|
||||||
} else {
|
} else {
|
||||||
yyv30 := &x.ISCSIInterface
|
yyv25 := &x.ISCSIInterface
|
||||||
yym31 := z.DecBinary()
|
yym26 := z.DecBinary()
|
||||||
_ = yym31
|
_ = yym26
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
*((*string)(yyv30)) = r.DecodeString()
|
*((*string)(yyv25)) = r.DecodeString()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -16515,21 +16404,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.FSType = ""
|
x.FSType = ""
|
||||||
} else {
|
} else {
|
||||||
yyv32 := &x.FSType
|
yyv27 := &x.FSType
|
||||||
yym33 := z.DecBinary()
|
yym28 := z.DecBinary()
|
||||||
_ = yym33
|
_ = yym28
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
*((*string)(yyv32)) = r.DecodeString()
|
*((*string)(yyv27)) = r.DecodeString()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -16537,21 +16426,21 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.ReadOnly = false
|
x.ReadOnly = false
|
||||||
} else {
|
} else {
|
||||||
yyv34 := &x.ReadOnly
|
yyv29 := &x.ReadOnly
|
||||||
yym35 := z.DecBinary()
|
yym30 := z.DecBinary()
|
||||||
_ = yym35
|
_ = yym30
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
*((*bool)(yyv34)) = r.DecodeBool()
|
*((*bool)(yyv29)) = r.DecodeBool()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -16559,91 +16448,26 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder
|
|||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.Portals = nil
|
x.Portals = nil
|
||||||
} else {
|
} else {
|
||||||
yyv36 := &x.Portals
|
yyv31 := &x.Portals
|
||||||
yym37 := z.DecBinary()
|
yym32 := z.DecBinary()
|
||||||
_ = yym37
|
_ = yym32
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
z.F.DecSliceStringX(yyv36, false, d)
|
z.F.DecSliceStringX(yyv31, false, d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
yyj23++
|
|
||||||
if yyhl23 {
|
|
||||||
yyb23 = yyj23 > l
|
|
||||||
} else {
|
|
||||||
yyb23 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb23 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.DiscoveryCHAPAuth = false
|
|
||||||
} else {
|
|
||||||
yyv38 := &x.DiscoveryCHAPAuth
|
|
||||||
yym39 := z.DecBinary()
|
|
||||||
_ = yym39
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv38)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj23++
|
|
||||||
if yyhl23 {
|
|
||||||
yyb23 = yyj23 > l
|
|
||||||
} else {
|
|
||||||
yyb23 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb23 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
x.SessionCHAPAuth = false
|
|
||||||
} else {
|
|
||||||
yyv40 := &x.SessionCHAPAuth
|
|
||||||
yym41 := z.DecBinary()
|
|
||||||
_ = yym41
|
|
||||||
if false {
|
|
||||||
} else {
|
|
||||||
*((*bool)(yyv40)) = r.DecodeBool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
yyj23++
|
|
||||||
if yyhl23 {
|
|
||||||
yyb23 = yyj23 > l
|
|
||||||
} else {
|
|
||||||
yyb23 = r.CheckBreak()
|
|
||||||
}
|
|
||||||
if yyb23 {
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
|
||||||
if r.TryDecodeAsNil() {
|
|
||||||
if x.SecretRef != nil {
|
|
||||||
x.SecretRef = nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if x.SecretRef == nil {
|
|
||||||
x.SecretRef = new(LocalObjectReference)
|
|
||||||
}
|
|
||||||
x.SecretRef.CodecDecodeSelf(d)
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
yyj23++
|
yyj18++
|
||||||
if yyhl23 {
|
if yyhl18 {
|
||||||
yyb23 = yyj23 > l
|
yyb18 = yyj18 > l
|
||||||
} else {
|
} else {
|
||||||
yyb23 = r.CheckBreak()
|
yyb18 = r.CheckBreak()
|
||||||
}
|
}
|
||||||
if yyb23 {
|
if yyb18 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||||
z.DecStructFieldNotFound(yyj23-1, "")
|
z.DecStructFieldNotFound(yyj18-1, "")
|
||||||
}
|
}
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -197,8 +197,6 @@ type ObjectMeta struct {
|
|||||||
// then an entry in this list will point to this controller, with the controller field set to true.
|
// then an entry in this list will point to this controller, with the controller field set to true.
|
||||||
// There cannot be more than one managing controller.
|
// There cannot be more than one managing controller.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=uid
|
|
||||||
// +patchStrategy=merge
|
|
||||||
OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
|
OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
|
||||||
|
|
||||||
// Must be empty before the object is deleted from the registry. Each entry
|
// Must be empty before the object is deleted from the registry. Each entry
|
||||||
@@ -206,7 +204,6 @@ type ObjectMeta struct {
|
|||||||
// from the list. If the deletionTimestamp of the object is non-nil, entries
|
// from the list. If the deletionTimestamp of the object is non-nil, entries
|
||||||
// in this list can only be removed.
|
// in this list can only be removed.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchStrategy=merge
|
|
||||||
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
|
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
|
||||||
|
|
||||||
// The name of the cluster which the object belongs to.
|
// The name of the cluster which the object belongs to.
|
||||||
@@ -1050,15 +1047,6 @@ type ISCSIVolumeSource struct {
|
|||||||
// is other than default (typically TCP ports 860 and 3260).
|
// is other than default (typically TCP ports 860 and 3260).
|
||||||
// +optional
|
// +optional
|
||||||
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
|
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
|
||||||
// whether support iSCSI Discovery CHAP authentication
|
|
||||||
// +optional
|
|
||||||
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
|
|
||||||
// whether support iSCSI Session CHAP authentication
|
|
||||||
// +optional
|
|
||||||
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
|
|
||||||
// CHAP secret for iSCSI target and initiator authentication
|
|
||||||
// +optional
|
|
||||||
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Represents a Fibre Channel volume.
|
// Represents a Fibre Channel volume.
|
||||||
@@ -1649,8 +1637,6 @@ type Container struct {
|
|||||||
// accessible from the network.
|
// accessible from the network.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=containerPort
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
|
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
|
||||||
// List of sources to populate environment variables in the container.
|
// List of sources to populate environment variables in the container.
|
||||||
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||||
@@ -1663,8 +1649,6 @@ type Container struct {
|
|||||||
// List of environment variables to set in the container.
|
// List of environment variables to set in the container.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
|
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
|
||||||
// Compute Resources required by this container.
|
// Compute Resources required by this container.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
@@ -1674,8 +1658,6 @@ type Container struct {
|
|||||||
// Pod volumes to mount into the container's filesystem.
|
// Pod volumes to mount into the container's filesystem.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=mountPath
|
|
||||||
// +patchStrategy=merge
|
|
||||||
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
|
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
|
||||||
// Periodic probe of container liveness.
|
// Periodic probe of container liveness.
|
||||||
// Container will be restarted if the probe fails.
|
// Container will be restarted if the probe fails.
|
||||||
@@ -1996,8 +1978,6 @@ type NodeSelectorTerm struct {
|
|||||||
// that relates the key and values.
|
// that relates the key and values.
|
||||||
type NodeSelectorRequirement struct {
|
type NodeSelectorRequirement struct {
|
||||||
// The label key that the selector applies to.
|
// The label key that the selector applies to.
|
||||||
// +patchMergeKey=key
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
|
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
|
||||||
// Represents a key's relationship to a set of values.
|
// Represents a key's relationship to a set of values.
|
||||||
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||||
@@ -2182,8 +2162,6 @@ type PreferredSchedulingTerm struct {
|
|||||||
// any pod that that does not tolerate the Taint.
|
// any pod that that does not tolerate the Taint.
|
||||||
type Taint struct {
|
type Taint struct {
|
||||||
// Required. The taint key to be applied to a node.
|
// Required. The taint key to be applied to a node.
|
||||||
// +patchMergeKey=key
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
|
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
|
||||||
// Required. The taint value corresponding to the taint key.
|
// Required. The taint value corresponding to the taint key.
|
||||||
// +optional
|
// +optional
|
||||||
@@ -2226,8 +2204,6 @@ type Toleration struct {
|
|||||||
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||||
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=key
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
|
Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
|
||||||
// Operator represents a key's relationship to the value.
|
// Operator represents a key's relationship to the value.
|
||||||
// Valid operators are Exists and Equal. Defaults to Equal.
|
// Valid operators are Exists and Equal. Defaults to Equal.
|
||||||
@@ -2285,8 +2261,6 @@ type PodSpec struct {
|
|||||||
// List of volumes that can be mounted by containers belonging to the pod.
|
// List of volumes that can be mounted by containers belonging to the pod.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/volumes
|
// More info: http://kubernetes.io/docs/user-guide/volumes
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
|
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
|
||||||
// List of initialization containers belonging to the pod.
|
// List of initialization containers belonging to the pod.
|
||||||
// Init containers are executed in order prior to containers being started. If any
|
// Init containers are executed in order prior to containers being started. If any
|
||||||
@@ -2301,16 +2275,12 @@ type PodSpec struct {
|
|||||||
// Init containers cannot currently be added or removed.
|
// Init containers cannot currently be added or removed.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/containers
|
// More info: http://kubernetes.io/docs/user-guide/containers
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
|
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
|
||||||
// List of containers belonging to the pod.
|
// List of containers belonging to the pod.
|
||||||
// Containers cannot currently be added or removed.
|
// Containers cannot currently be added or removed.
|
||||||
// There must be at least one container in a Pod.
|
// There must be at least one container in a Pod.
|
||||||
// Cannot be updated.
|
// Cannot be updated.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/containers
|
// More info: http://kubernetes.io/docs/user-guide/containers
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
|
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
|
||||||
// Restart policy for all containers within the pod.
|
// Restart policy for all containers within the pod.
|
||||||
// One of Always, OnFailure, Never.
|
// One of Always, OnFailure, Never.
|
||||||
@@ -2387,8 +2357,6 @@ type PodSpec struct {
|
|||||||
// in the case of docker, only DockerConfig type secrets are honored.
|
// in the case of docker, only DockerConfig type secrets are honored.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
|
// More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
|
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
|
||||||
// Specifies the hostname of the Pod
|
// Specifies the hostname of the Pod
|
||||||
// If not specified, the pod's hostname will be set to a system-defined value.
|
// If not specified, the pod's hostname will be set to a system-defined value.
|
||||||
@@ -2476,8 +2444,6 @@ type PodStatus struct {
|
|||||||
// Current service state of pod.
|
// Current service state of pod.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions
|
// More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
|
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
|
||||||
// A human readable message indicating details about why the pod is in this condition.
|
// A human readable message indicating details about why the pod is in this condition.
|
||||||
// +optional
|
// +optional
|
||||||
@@ -2674,8 +2640,6 @@ type ReplicationControllerStatus struct {
|
|||||||
|
|
||||||
// Represents the latest available observations of a replication controller's current state.
|
// Represents the latest available observations of a replication controller's current state.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2813,8 +2777,6 @@ type LoadBalancerIngress struct {
|
|||||||
type ServiceSpec struct {
|
type ServiceSpec struct {
|
||||||
// The list of ports that are exposed by this service.
|
// The list of ports that are exposed by this service.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies
|
// More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies
|
||||||
// +patchMergeKey=port
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
|
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
|
||||||
|
|
||||||
// Route service traffic to pods with label keys and values matching this
|
// Route service traffic to pods with label keys and values matching this
|
||||||
@@ -3000,8 +2962,6 @@ type ServiceAccount struct {
|
|||||||
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
|
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/secrets
|
// More info: http://kubernetes.io/docs/user-guide/secrets
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=name
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
|
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
|
||||||
|
|
||||||
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
|
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
|
||||||
@@ -3222,15 +3182,11 @@ type NodeStatus struct {
|
|||||||
// Conditions is an array of current observed node conditions.
|
// Conditions is an array of current observed node conditions.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition
|
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
|
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
|
||||||
// List of addresses reachable to the node.
|
// List of addresses reachable to the node.
|
||||||
// Queried from cloud provider, if available.
|
// Queried from cloud provider, if available.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses
|
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
|
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
|
||||||
// Endpoints of daemons running on the Node.
|
// Endpoints of daemons running on the Node.
|
||||||
// +optional
|
// +optional
|
||||||
@@ -4279,8 +4235,6 @@ type ComponentStatus struct {
|
|||||||
|
|
||||||
// List of component conditions observed
|
// List of component conditions observed
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
|
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -650,17 +650,14 @@ func (HostPathVolumeSource) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_ISCSIVolumeSource = map[string]string{
|
var map_ISCSIVolumeSource = map[string]string{
|
||||||
"": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
|
"": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
|
||||||
"targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
|
"targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
|
||||||
"iqn": "Target iSCSI Qualified Name.",
|
"iqn": "Target iSCSI Qualified Name.",
|
||||||
"lun": "iSCSI target lun number.",
|
"lun": "iSCSI target lun number.",
|
||||||
"iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.",
|
"iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.",
|
||||||
"fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi",
|
"fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi",
|
||||||
"readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
|
"readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
|
||||||
"portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
|
"portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
|
||||||
"chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication",
|
|
||||||
"chapAuthSession": "whether support iSCSI Session CHAP authentication",
|
|
||||||
"secretRef": "CHAP secret for iSCSI target and initiator authentication",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
|
func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
|
||||||
|
|||||||
@@ -1706,9 +1706,6 @@ func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSo
|
|||||||
out.FSType = in.FSType
|
out.FSType = in.FSType
|
||||||
out.ReadOnly = in.ReadOnly
|
out.ReadOnly = in.ReadOnly
|
||||||
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
|
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
|
||||||
out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth
|
|
||||||
out.SessionCHAPAuth = in.SessionCHAPAuth
|
|
||||||
out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1724,9 +1721,6 @@ func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolu
|
|||||||
out.FSType = in.FSType
|
out.FSType = in.FSType
|
||||||
out.ReadOnly = in.ReadOnly
|
out.ReadOnly = in.ReadOnly
|
||||||
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
|
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
|
||||||
out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth
|
|
||||||
out.SessionCHAPAuth = in.SessionCHAPAuth
|
|
||||||
out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef))
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1198,11 +1198,6 @@ func DeepCopy_v1_ISCSIVolumeSource(in interface{}, out interface{}, c *conversio
|
|||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
if in.SecretRef != nil {
|
|
||||||
in, out := &in.SecretRef, &out.SecretRef
|
|
||||||
*out = new(LocalObjectReference)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1226,11 +1226,6 @@ func DeepCopy_api_ISCSIVolumeSource(in interface{}, out interface{}, c *conversi
|
|||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
if in.SecretRef != nil {
|
|
||||||
in, out := &in.SecretRef, &out.SecretRef
|
|
||||||
*out = new(LocalObjectReference)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -167,8 +167,6 @@ message DeploymentStatus {
|
|||||||
optional int32 unavailableReplicas = 5;
|
optional int32 unavailableReplicas = 5;
|
||||||
|
|
||||||
// Represents the latest available observations of a deployment's current state.
|
// Represents the latest available observations of a deployment's current state.
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated DeploymentCondition conditions = 6;
|
repeated DeploymentCondition conditions = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -327,8 +327,6 @@ type DeploymentStatus struct {
|
|||||||
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
|
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
|
||||||
|
|
||||||
// Represents the latest available observations of a deployment's current state.
|
// Represents the latest available observations of a deployment's current state.
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -50,7 +50,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *ExtraValue) Reset() { *m = ExtraValue{} }
|
func (m *ExtraValue) Reset() { *m = ExtraValue{} }
|
||||||
func (*ExtraValue) ProtoMessage() {}
|
func (*ExtraValue) ProtoMessage() {}
|
||||||
@@ -79,74 +81,74 @@ func init() {
|
|||||||
proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewStatus")
|
proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewStatus")
|
||||||
proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.UserInfo")
|
proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.UserInfo")
|
||||||
}
|
}
|
||||||
func (m ExtraValue) Marshal() (data []byte, err error) {
|
func (m ExtraValue) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m ExtraValue) MarshalTo(data []byte) (int, error) {
|
func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m) > 0 {
|
if len(m) > 0 {
|
||||||
for _, s := range m {
|
for _, s := range m {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
l = len(s)
|
l = len(s)
|
||||||
for l >= 1<<7 {
|
for l >= 1<<7 {
|
||||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
l >>= 7
|
l >>= 7
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = uint8(l)
|
dAtA[i] = uint8(l)
|
||||||
i++
|
i++
|
||||||
i += copy(data[i:], s)
|
i += copy(dAtA[i:], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReview) Marshal() (data []byte, err error) {
|
func (m *TokenReview) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReview) MarshalTo(data []byte) (int, error) {
|
func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||||
n2, err := m.Spec.MarshalTo(data[i:])
|
n2, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n2
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
|
||||||
n3, err := m.Status.MarshalTo(data[i:])
|
n3, err := m.Status.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -154,120 +156,124 @@ func (m *TokenReview) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewSpec) Marshal() (data []byte, err error) {
|
func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) {
|
func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Token)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token)))
|
||||||
i += copy(data[i:], m.Token)
|
i += copy(dAtA[i:], m.Token)
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewStatus) Marshal() (data []byte, err error) {
|
func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) {
|
func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
if m.Authenticated {
|
if m.Authenticated {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.User.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size()))
|
||||||
n4, err := m.User.MarshalTo(data[i:])
|
n4, err := m.User.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n4
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Error)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
|
||||||
i += copy(data[i:], m.Error)
|
i += copy(dAtA[i:], m.Error)
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UserInfo) Marshal() (data []byte, err error) {
|
func (m *UserInfo) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UserInfo) MarshalTo(data []byte) (int, error) {
|
func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Username)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username)))
|
||||||
i += copy(data[i:], m.Username)
|
i += copy(dAtA[i:], m.Username)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.UID)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
|
||||||
i += copy(data[i:], m.UID)
|
i += copy(dAtA[i:], m.UID)
|
||||||
if len(m.Groups) > 0 {
|
if len(m.Groups) > 0 {
|
||||||
for _, s := range m.Groups {
|
for _, s := range m.Groups {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
l = len(s)
|
l = len(s)
|
||||||
for l >= 1<<7 {
|
for l >= 1<<7 {
|
||||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
l >>= 7
|
l >>= 7
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = uint8(l)
|
dAtA[i] = uint8(l)
|
||||||
i++
|
i++
|
||||||
i += copy(data[i:], s)
|
i += copy(dAtA[i:], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(m.Extra) > 0 {
|
if len(m.Extra) > 0 {
|
||||||
for k := range m.Extra {
|
for k := range m.Extra {
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
v := m.Extra[k]
|
v := m.Extra[k]
|
||||||
msgSize := (&v).Size()
|
msgSize := 0
|
||||||
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
|
if (&v) != nil {
|
||||||
i = encodeVarintGenerated(data, i, uint64(mapSize))
|
msgSize = (&v).Size()
|
||||||
data[i] = 0xa
|
msgSize += 1 + sovGenerated(uint64(msgSize))
|
||||||
|
}
|
||||||
|
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(k)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
|
||||||
i += copy(data[i:], k)
|
i += copy(dAtA[i:], k)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64((&v).Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
|
||||||
n5, err := (&v).MarshalTo(data[i:])
|
n5, err := (&v).MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -277,31 +283,31 @@ func (m *UserInfo) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m ExtraValue) Size() (n int) {
|
func (m ExtraValue) Size() (n int) {
|
||||||
@@ -450,8 +456,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *ExtraValue) Unmarshal(data []byte) error {
|
func (m *ExtraValue) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -463,7 +469,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -491,7 +497,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -506,11 +512,11 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
*m = append(*m, string(data[iNdEx:postIndex]))
|
*m = append(*m, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -529,8 +535,8 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *TokenReview) Unmarshal(data []byte) error {
|
func (m *TokenReview) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -542,7 +548,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -570,7 +576,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -584,7 +590,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -600,7 +606,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -614,7 +620,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -630,7 +636,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -644,13 +650,13 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -669,8 +675,8 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -682,7 +688,7 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -710,7 +716,7 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -725,11 +731,11 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Token = string(data[iNdEx:postIndex])
|
m.Token = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -748,8 +754,8 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -761,7 +767,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -789,7 +795,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -809,7 +815,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -823,7 +829,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -839,7 +845,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -854,11 +860,11 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Error = string(data[iNdEx:postIndex])
|
m.Error = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -877,8 +883,8 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *UserInfo) Unmarshal(data []byte) error {
|
func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -890,7 +896,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -918,7 +924,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -933,7 +939,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Username = string(data[iNdEx:postIndex])
|
m.Username = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 2:
|
case 2:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -947,7 +953,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -962,7 +968,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.UID = string(data[iNdEx:postIndex])
|
m.UID = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -976,7 +982,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -991,7 +997,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Groups = append(m.Groups, string(data[iNdEx:postIndex]))
|
m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 4:
|
case 4:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -1005,7 +1011,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1027,7 +1033,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
keykey |= (uint64(b) & 0x7F) << shift
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1042,7 +1048,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1057,61 +1063,66 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postStringIndexmapkey > l {
|
if postStringIndexmapkey > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
mapkey := string(data[iNdEx:postStringIndexmapkey])
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
iNdEx = postStringIndexmapkey
|
iNdEx = postStringIndexmapkey
|
||||||
var valuekey uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
valuekey |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var mapmsglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
mapmsglen |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if mapmsglen < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
postmsgIndex := iNdEx + mapmsglen
|
|
||||||
if mapmsglen < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
if postmsgIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
mapvalue := &ExtraValue{}
|
|
||||||
if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postmsgIndex
|
|
||||||
if m.Extra == nil {
|
if m.Extra == nil {
|
||||||
m.Extra = make(map[string]ExtraValue)
|
m.Extra = make(map[string]ExtraValue)
|
||||||
}
|
}
|
||||||
m.Extra[mapkey] = *mapvalue
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var mapmsglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
mapmsglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mapmsglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postmsgIndex := iNdEx + mapmsglen
|
||||||
|
if mapmsglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postmsgIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := &ExtraValue{}
|
||||||
|
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postmsgIndex
|
||||||
|
m.Extra[mapkey] = *mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue ExtraValue
|
||||||
|
m.Extra[mapkey] = mapvalue
|
||||||
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1130,8 +1141,8 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -1142,7 +1153,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1160,7 +1171,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1177,7 +1188,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1200,7 +1211,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1211,7 +1222,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -1235,6 +1246,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/authentication/v1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 655 bytes of a gzipped FileDescriptorProto
|
// 655 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c,
|
||||||
|
|||||||
@@ -50,7 +50,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *ExtraValue) Reset() { *m = ExtraValue{} }
|
func (m *ExtraValue) Reset() { *m = ExtraValue{} }
|
||||||
func (*ExtraValue) ProtoMessage() {}
|
func (*ExtraValue) ProtoMessage() {}
|
||||||
@@ -79,74 +81,74 @@ func init() {
|
|||||||
proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewStatus")
|
proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewStatus")
|
||||||
proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.UserInfo")
|
proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.UserInfo")
|
||||||
}
|
}
|
||||||
func (m ExtraValue) Marshal() (data []byte, err error) {
|
func (m ExtraValue) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m ExtraValue) MarshalTo(data []byte) (int, error) {
|
func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m) > 0 {
|
if len(m) > 0 {
|
||||||
for _, s := range m {
|
for _, s := range m {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
l = len(s)
|
l = len(s)
|
||||||
for l >= 1<<7 {
|
for l >= 1<<7 {
|
||||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
l >>= 7
|
l >>= 7
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = uint8(l)
|
dAtA[i] = uint8(l)
|
||||||
i++
|
i++
|
||||||
i += copy(data[i:], s)
|
i += copy(dAtA[i:], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReview) Marshal() (data []byte, err error) {
|
func (m *TokenReview) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReview) MarshalTo(data []byte) (int, error) {
|
func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||||
n2, err := m.Spec.MarshalTo(data[i:])
|
n2, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n2
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
|
||||||
n3, err := m.Status.MarshalTo(data[i:])
|
n3, err := m.Status.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -154,120 +156,124 @@ func (m *TokenReview) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewSpec) Marshal() (data []byte, err error) {
|
func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) {
|
func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Token)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token)))
|
||||||
i += copy(data[i:], m.Token)
|
i += copy(dAtA[i:], m.Token)
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewStatus) Marshal() (data []byte, err error) {
|
func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) {
|
func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
if m.Authenticated {
|
if m.Authenticated {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.User.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size()))
|
||||||
n4, err := m.User.MarshalTo(data[i:])
|
n4, err := m.User.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n4
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Error)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
|
||||||
i += copy(data[i:], m.Error)
|
i += copy(dAtA[i:], m.Error)
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UserInfo) Marshal() (data []byte, err error) {
|
func (m *UserInfo) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UserInfo) MarshalTo(data []byte) (int, error) {
|
func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Username)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username)))
|
||||||
i += copy(data[i:], m.Username)
|
i += copy(dAtA[i:], m.Username)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.UID)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
|
||||||
i += copy(data[i:], m.UID)
|
i += copy(dAtA[i:], m.UID)
|
||||||
if len(m.Groups) > 0 {
|
if len(m.Groups) > 0 {
|
||||||
for _, s := range m.Groups {
|
for _, s := range m.Groups {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
l = len(s)
|
l = len(s)
|
||||||
for l >= 1<<7 {
|
for l >= 1<<7 {
|
||||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
l >>= 7
|
l >>= 7
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = uint8(l)
|
dAtA[i] = uint8(l)
|
||||||
i++
|
i++
|
||||||
i += copy(data[i:], s)
|
i += copy(dAtA[i:], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(m.Extra) > 0 {
|
if len(m.Extra) > 0 {
|
||||||
for k := range m.Extra {
|
for k := range m.Extra {
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
v := m.Extra[k]
|
v := m.Extra[k]
|
||||||
msgSize := (&v).Size()
|
msgSize := 0
|
||||||
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
|
if (&v) != nil {
|
||||||
i = encodeVarintGenerated(data, i, uint64(mapSize))
|
msgSize = (&v).Size()
|
||||||
data[i] = 0xa
|
msgSize += 1 + sovGenerated(uint64(msgSize))
|
||||||
|
}
|
||||||
|
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(k)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
|
||||||
i += copy(data[i:], k)
|
i += copy(dAtA[i:], k)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64((&v).Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
|
||||||
n5, err := (&v).MarshalTo(data[i:])
|
n5, err := (&v).MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -277,31 +283,31 @@ func (m *UserInfo) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m ExtraValue) Size() (n int) {
|
func (m ExtraValue) Size() (n int) {
|
||||||
@@ -450,8 +456,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *ExtraValue) Unmarshal(data []byte) error {
|
func (m *ExtraValue) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -463,7 +469,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -491,7 +497,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -506,11 +512,11 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
*m = append(*m, string(data[iNdEx:postIndex]))
|
*m = append(*m, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -529,8 +535,8 @@ func (m *ExtraValue) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *TokenReview) Unmarshal(data []byte) error {
|
func (m *TokenReview) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -542,7 +548,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -570,7 +576,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -584,7 +590,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -600,7 +606,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -614,7 +620,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -630,7 +636,7 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -644,13 +650,13 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -669,8 +675,8 @@ func (m *TokenReview) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -682,7 +688,7 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -710,7 +716,7 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -725,11 +731,11 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Token = string(data[iNdEx:postIndex])
|
m.Token = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -748,8 +754,8 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -761,7 +767,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -789,7 +795,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -809,7 +815,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -823,7 +829,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -839,7 +845,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -854,11 +860,11 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Error = string(data[iNdEx:postIndex])
|
m.Error = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -877,8 +883,8 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *UserInfo) Unmarshal(data []byte) error {
|
func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -890,7 +896,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -918,7 +924,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -933,7 +939,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Username = string(data[iNdEx:postIndex])
|
m.Username = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 2:
|
case 2:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -947,7 +953,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -962,7 +968,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.UID = string(data[iNdEx:postIndex])
|
m.UID = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -976,7 +982,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -991,7 +997,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Groups = append(m.Groups, string(data[iNdEx:postIndex]))
|
m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 4:
|
case 4:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -1005,7 +1011,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1027,7 +1033,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
keykey |= (uint64(b) & 0x7F) << shift
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1042,7 +1048,7 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1057,61 +1063,66 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
if postStringIndexmapkey > l {
|
if postStringIndexmapkey > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
mapkey := string(data[iNdEx:postStringIndexmapkey])
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
iNdEx = postStringIndexmapkey
|
iNdEx = postStringIndexmapkey
|
||||||
var valuekey uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
valuekey |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var mapmsglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
mapmsglen |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if mapmsglen < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
postmsgIndex := iNdEx + mapmsglen
|
|
||||||
if mapmsglen < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
if postmsgIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
mapvalue := &ExtraValue{}
|
|
||||||
if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postmsgIndex
|
|
||||||
if m.Extra == nil {
|
if m.Extra == nil {
|
||||||
m.Extra = make(map[string]ExtraValue)
|
m.Extra = make(map[string]ExtraValue)
|
||||||
}
|
}
|
||||||
m.Extra[mapkey] = *mapvalue
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var mapmsglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
mapmsglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mapmsglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postmsgIndex := iNdEx + mapmsglen
|
||||||
|
if mapmsglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postmsgIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := &ExtraValue{}
|
||||||
|
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postmsgIndex
|
||||||
|
m.Extra[mapkey] = *mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue ExtraValue
|
||||||
|
m.Extra[mapkey] = mapvalue
|
||||||
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1130,8 +1141,8 @@ func (m *UserInfo) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -1142,7 +1153,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1160,7 +1171,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1177,7 +1188,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1200,7 +1211,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1211,7 +1222,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -1235,6 +1246,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 668 bytes of a gzipped FileDescriptorProto
|
// 668 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x4a,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x4a,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -53,7 +53,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *Job) Reset() { *m = Job{} }
|
func (m *Job) Reset() { *m = Job{} }
|
||||||
func (*Job) ProtoMessage() {}
|
func (*Job) ProtoMessage() {}
|
||||||
@@ -82,41 +84,41 @@ func init() {
|
|||||||
proto.RegisterType((*JobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobSpec")
|
proto.RegisterType((*JobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobSpec")
|
||||||
proto.RegisterType((*JobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobStatus")
|
proto.RegisterType((*JobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobStatus")
|
||||||
}
|
}
|
||||||
func (m *Job) Marshal() (data []byte, err error) {
|
func (m *Job) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Job) MarshalTo(data []byte) (int, error) {
|
func (m *Job) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||||
n2, err := m.Spec.MarshalTo(data[i:])
|
n2, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n2
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
|
||||||
n3, err := m.Status.MarshalTo(data[i:])
|
n3, err := m.Status.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -124,85 +126,85 @@ func (m *Job) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobCondition) Marshal() (data []byte, err error) {
|
func (m *JobCondition) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobCondition) MarshalTo(data []byte) (int, error) {
|
func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
|
||||||
i += copy(data[i:], m.Type)
|
i += copy(dAtA[i:], m.Type)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
|
||||||
i += copy(data[i:], m.Status)
|
i += copy(dAtA[i:], m.Status)
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size()))
|
||||||
n4, err := m.LastProbeTime.MarshalTo(data[i:])
|
n4, err := m.LastProbeTime.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n4
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size()))
|
||||||
n5, err := m.LastTransitionTime.MarshalTo(data[i:])
|
n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n5
|
i += n5
|
||||||
data[i] = 0x2a
|
dAtA[i] = 0x2a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
|
||||||
i += copy(data[i:], m.Reason)
|
i += copy(dAtA[i:], m.Reason)
|
||||||
data[i] = 0x32
|
dAtA[i] = 0x32
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
|
||||||
i += copy(data[i:], m.Message)
|
i += copy(dAtA[i:], m.Message)
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobList) Marshal() (data []byte, err error) {
|
func (m *JobList) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobList) MarshalTo(data []byte) (int, error) {
|
func (m *JobList) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||||
n6, err := m.ListMeta.MarshalTo(data[i:])
|
n6, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n6
|
i += n6
|
||||||
if len(m.Items) > 0 {
|
if len(m.Items) > 0 {
|
||||||
for _, msg := range m.Items {
|
for _, msg := range m.Items {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -212,60 +214,60 @@ func (m *JobList) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobSpec) Marshal() (data []byte, err error) {
|
func (m *JobSpec) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobSpec) MarshalTo(data []byte) (int, error) {
|
func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Parallelism != nil {
|
if m.Parallelism != nil {
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(*m.Parallelism))
|
i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism))
|
||||||
}
|
}
|
||||||
if m.Completions != nil {
|
if m.Completions != nil {
|
||||||
data[i] = 0x10
|
dAtA[i] = 0x10
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(*m.Completions))
|
i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions))
|
||||||
}
|
}
|
||||||
if m.ActiveDeadlineSeconds != nil {
|
if m.ActiveDeadlineSeconds != nil {
|
||||||
data[i] = 0x18
|
dAtA[i] = 0x18
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds))
|
i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds))
|
||||||
}
|
}
|
||||||
if m.Selector != nil {
|
if m.Selector != nil {
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
|
||||||
n7, err := m.Selector.MarshalTo(data[i:])
|
n7, err := m.Selector.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n7
|
i += n7
|
||||||
}
|
}
|
||||||
if m.ManualSelector != nil {
|
if m.ManualSelector != nil {
|
||||||
data[i] = 0x28
|
dAtA[i] = 0x28
|
||||||
i++
|
i++
|
||||||
if *m.ManualSelector {
|
if *m.ManualSelector {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = 0x32
|
dAtA[i] = 0x32
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
|
||||||
n8, err := m.Template.MarshalTo(data[i:])
|
n8, err := m.Template.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -273,27 +275,27 @@ func (m *JobSpec) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobStatus) Marshal() (data []byte, err error) {
|
func (m *JobStatus) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobStatus) MarshalTo(data []byte) (int, error) {
|
func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Conditions) > 0 {
|
if len(m.Conditions) > 0 {
|
||||||
for _, msg := range m.Conditions {
|
for _, msg := range m.Conditions {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -301,62 +303,62 @@ func (m *JobStatus) MarshalTo(data []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m.StartTime != nil {
|
if m.StartTime != nil {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size()))
|
||||||
n9, err := m.StartTime.MarshalTo(data[i:])
|
n9, err := m.StartTime.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n9
|
i += n9
|
||||||
}
|
}
|
||||||
if m.CompletionTime != nil {
|
if m.CompletionTime != nil {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size()))
|
||||||
n10, err := m.CompletionTime.MarshalTo(data[i:])
|
n10, err := m.CompletionTime.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n10
|
i += n10
|
||||||
}
|
}
|
||||||
data[i] = 0x20
|
dAtA[i] = 0x20
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Active))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Active))
|
||||||
data[i] = 0x28
|
dAtA[i] = 0x28
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Succeeded))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded))
|
||||||
data[i] = 0x30
|
dAtA[i] = 0x30
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Failed))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Failed))
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *Job) Size() (n int) {
|
func (m *Job) Size() (n int) {
|
||||||
@@ -539,8 +541,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *Job) Unmarshal(data []byte) error {
|
func (m *Job) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -552,7 +554,7 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -580,7 +582,7 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -594,7 +596,7 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -610,7 +612,7 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -624,7 +626,7 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -640,7 +642,7 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -654,13 +656,13 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -679,8 +681,8 @@ func (m *Job) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *JobCondition) Unmarshal(data []byte) error {
|
func (m *JobCondition) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -692,7 +694,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -720,7 +722,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -735,7 +737,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Type = JobConditionType(data[iNdEx:postIndex])
|
m.Type = JobConditionType(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 2:
|
case 2:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -749,7 +751,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -764,7 +766,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex])
|
m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -778,7 +780,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -792,7 +794,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -808,7 +810,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -822,7 +824,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -838,7 +840,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -853,7 +855,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Reason = string(data[iNdEx:postIndex])
|
m.Reason = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 6:
|
case 6:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -867,7 +869,7 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -882,11 +884,11 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Message = string(data[iNdEx:postIndex])
|
m.Message = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -905,8 +907,8 @@ func (m *JobCondition) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *JobList) Unmarshal(data []byte) error {
|
func (m *JobList) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -918,7 +920,7 @@ func (m *JobList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -946,7 +948,7 @@ func (m *JobList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -960,7 +962,7 @@ func (m *JobList) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -976,7 +978,7 @@ func (m *JobList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -991,13 +993,13 @@ func (m *JobList) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Items = append(m.Items, Job{})
|
m.Items = append(m.Items, Job{})
|
||||||
if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1016,8 +1018,8 @@ func (m *JobList) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *JobSpec) Unmarshal(data []byte) error {
|
func (m *JobSpec) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -1029,7 +1031,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1057,7 +1059,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int32(b) & 0x7F) << shift
|
v |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1077,7 +1079,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int32(b) & 0x7F) << shift
|
v |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1097,7 +1099,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int64(b) & 0x7F) << shift
|
v |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1117,7 +1119,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1134,7 +1136,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if m.Selector == nil {
|
if m.Selector == nil {
|
||||||
m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
|
m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
|
||||||
}
|
}
|
||||||
if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -1150,7 +1152,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1171,7 +1173,7 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1185,13 +1187,13 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1210,8 +1212,8 @@ func (m *JobSpec) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *JobStatus) Unmarshal(data []byte) error {
|
func (m *JobStatus) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -1223,7 +1225,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1251,7 +1253,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1266,7 +1268,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Conditions = append(m.Conditions, JobCondition{})
|
m.Conditions = append(m.Conditions, JobCondition{})
|
||||||
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -1282,7 +1284,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1299,7 +1301,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if m.StartTime == nil {
|
if m.StartTime == nil {
|
||||||
m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
||||||
}
|
}
|
||||||
if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -1315,7 +1317,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1332,7 +1334,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if m.CompletionTime == nil {
|
if m.CompletionTime == nil {
|
||||||
m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
||||||
}
|
}
|
||||||
if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -1348,7 +1350,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Active |= (int32(b) & 0x7F) << shift
|
m.Active |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1367,7 +1369,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Succeeded |= (int32(b) & 0x7F) << shift
|
m.Succeeded |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1386,7 +1388,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Failed |= (int32(b) & 0x7F) << shift
|
m.Failed |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1395,7 +1397,7 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1414,8 +1416,8 @@ func (m *JobStatus) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -1426,7 +1428,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1444,7 +1446,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1461,7 +1463,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1484,7 +1486,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1495,7 +1497,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -1519,6 +1521,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/batch/v1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 885 bytes of a gzipped FileDescriptorProto
|
// 885 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x6e, 0xe3, 0x44,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x6e, 0xe3, 0x44,
|
||||||
|
|||||||
@@ -139,8 +139,6 @@ message JobStatus {
|
|||||||
// Conditions represent the latest available observations of an object's current state.
|
// Conditions represent the latest available observations of an object's current state.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/jobs
|
// More info: http://kubernetes.io/docs/user-guide/jobs
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated JobCondition conditions = 1;
|
repeated JobCondition conditions = 1;
|
||||||
|
|
||||||
// StartTime represents time when the job was acknowledged by the Job Manager.
|
// StartTime represents time when the job was acknowledged by the Job Manager.
|
||||||
|
|||||||
@@ -110,8 +110,6 @@ type JobStatus struct {
|
|||||||
// Conditions represent the latest available observations of an object's current state.
|
// Conditions represent the latest available observations of an object's current state.
|
||||||
// More info: http://kubernetes.io/docs/user-guide/jobs
|
// More info: http://kubernetes.io/docs/user-guide/jobs
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||||
|
|
||||||
// StartTime represents time when the job was acknowledged by the Job Manager.
|
// StartTime represents time when the job was acknowledged by the Job Manager.
|
||||||
|
|||||||
@@ -54,7 +54,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *CronJob) Reset() { *m = CronJob{} }
|
func (m *CronJob) Reset() { *m = CronJob{} }
|
||||||
func (*CronJob) ProtoMessage() {}
|
func (*CronJob) ProtoMessage() {}
|
||||||
@@ -88,41 +90,41 @@ func init() {
|
|||||||
proto.RegisterType((*JobTemplate)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplate")
|
proto.RegisterType((*JobTemplate)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplate")
|
||||||
proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplateSpec")
|
proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplateSpec")
|
||||||
}
|
}
|
||||||
func (m *CronJob) Marshal() (data []byte, err error) {
|
func (m *CronJob) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJob) MarshalTo(data []byte) (int, error) {
|
func (m *CronJob) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||||
n2, err := m.Spec.MarshalTo(data[i:])
|
n2, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n2
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
|
||||||
n3, err := m.Status.MarshalTo(data[i:])
|
n3, err := m.Status.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -130,35 +132,35 @@ func (m *CronJob) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJobList) Marshal() (data []byte, err error) {
|
func (m *CronJobList) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJobList) MarshalTo(data []byte) (int, error) {
|
func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||||
n4, err := m.ListMeta.MarshalTo(data[i:])
|
n4, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n4
|
||||||
if len(m.Items) > 0 {
|
if len(m.Items) > 0 {
|
||||||
for _, msg := range m.Items {
|
for _, msg := range m.Items {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -168,86 +170,86 @@ func (m *CronJobList) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJobSpec) Marshal() (data []byte, err error) {
|
func (m *CronJobSpec) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJobSpec) MarshalTo(data []byte) (int, error) {
|
func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Schedule)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule)))
|
||||||
i += copy(data[i:], m.Schedule)
|
i += copy(dAtA[i:], m.Schedule)
|
||||||
if m.StartingDeadlineSeconds != nil {
|
if m.StartingDeadlineSeconds != nil {
|
||||||
data[i] = 0x10
|
dAtA[i] = 0x10
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds))
|
i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds))
|
||||||
}
|
}
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy)))
|
||||||
i += copy(data[i:], m.ConcurrencyPolicy)
|
i += copy(dAtA[i:], m.ConcurrencyPolicy)
|
||||||
if m.Suspend != nil {
|
if m.Suspend != nil {
|
||||||
data[i] = 0x20
|
dAtA[i] = 0x20
|
||||||
i++
|
i++
|
||||||
if *m.Suspend {
|
if *m.Suspend {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = 0x2a
|
dAtA[i] = 0x2a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size()))
|
||||||
n5, err := m.JobTemplate.MarshalTo(data[i:])
|
n5, err := m.JobTemplate.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n5
|
i += n5
|
||||||
if m.SuccessfulJobsHistoryLimit != nil {
|
if m.SuccessfulJobsHistoryLimit != nil {
|
||||||
data[i] = 0x30
|
dAtA[i] = 0x30
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(*m.SuccessfulJobsHistoryLimit))
|
i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit))
|
||||||
}
|
}
|
||||||
if m.FailedJobsHistoryLimit != nil {
|
if m.FailedJobsHistoryLimit != nil {
|
||||||
data[i] = 0x38
|
dAtA[i] = 0x38
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(*m.FailedJobsHistoryLimit))
|
i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit))
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJobStatus) Marshal() (data []byte, err error) {
|
func (m *CronJobStatus) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJobStatus) MarshalTo(data []byte) (int, error) {
|
func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Active) > 0 {
|
if len(m.Active) > 0 {
|
||||||
for _, msg := range m.Active {
|
for _, msg := range m.Active {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -255,10 +257,10 @@ func (m *CronJobStatus) MarshalTo(data []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m.LastScheduleTime != nil {
|
if m.LastScheduleTime != nil {
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size()))
|
||||||
n6, err := m.LastScheduleTime.MarshalTo(data[i:])
|
n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -267,33 +269,33 @@ func (m *CronJobStatus) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobTemplate) Marshal() (data []byte, err error) {
|
func (m *JobTemplate) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobTemplate) MarshalTo(data []byte) (int, error) {
|
func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n7, err := m.ObjectMeta.MarshalTo(data[i:])
|
n7, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n7
|
i += n7
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
|
||||||
n8, err := m.Template.MarshalTo(data[i:])
|
n8, err := m.Template.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -301,33 +303,33 @@ func (m *JobTemplate) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobTemplateSpec) Marshal() (data []byte, err error) {
|
func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) {
|
func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n9, err := m.ObjectMeta.MarshalTo(data[i:])
|
n9, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n9
|
i += n9
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||||
n10, err := m.Spec.MarshalTo(data[i:])
|
n10, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -335,31 +337,31 @@ func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *CronJob) Size() (n int) {
|
func (m *CronJob) Size() (n int) {
|
||||||
@@ -541,8 +543,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *CronJob) Unmarshal(data []byte) error {
|
func (m *CronJob) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -554,7 +556,7 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -582,7 +584,7 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -596,7 +598,7 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -612,7 +614,7 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -626,7 +628,7 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -642,7 +644,7 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -656,13 +658,13 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -681,8 +683,8 @@ func (m *CronJob) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *CronJobList) Unmarshal(data []byte) error {
|
func (m *CronJobList) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -694,7 +696,7 @@ func (m *CronJobList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -722,7 +724,7 @@ func (m *CronJobList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -736,7 +738,7 @@ func (m *CronJobList) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -752,7 +754,7 @@ func (m *CronJobList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -767,13 +769,13 @@ func (m *CronJobList) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Items = append(m.Items, CronJob{})
|
m.Items = append(m.Items, CronJob{})
|
||||||
if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -792,8 +794,8 @@ func (m *CronJobList) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *CronJobSpec) Unmarshal(data []byte) error {
|
func (m *CronJobSpec) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -805,7 +807,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -833,7 +835,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -848,7 +850,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Schedule = string(data[iNdEx:postIndex])
|
m.Schedule = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 2:
|
case 2:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
@@ -862,7 +864,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int64(b) & 0x7F) << shift
|
v |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -882,7 +884,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -897,7 +899,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex])
|
m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 4:
|
case 4:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
@@ -911,7 +913,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -932,7 +934,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -946,7 +948,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -962,7 +964,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int32(b) & 0x7F) << shift
|
v |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -982,7 +984,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int32(b) & 0x7F) << shift
|
v |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -992,7 +994,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
m.FailedJobsHistoryLimit = &v
|
m.FailedJobsHistoryLimit = &v
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1011,8 +1013,8 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *CronJobStatus) Unmarshal(data []byte) error {
|
func (m *CronJobStatus) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -1024,7 +1026,7 @@ func (m *CronJobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1052,7 +1054,7 @@ func (m *CronJobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1067,7 +1069,7 @@ func (m *CronJobStatus) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{})
|
m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{})
|
||||||
if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -1083,7 +1085,7 @@ func (m *CronJobStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1100,13 +1102,13 @@ func (m *CronJobStatus) Unmarshal(data []byte) error {
|
|||||||
if m.LastScheduleTime == nil {
|
if m.LastScheduleTime == nil {
|
||||||
m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
||||||
}
|
}
|
||||||
if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1125,8 +1127,8 @@ func (m *CronJobStatus) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *JobTemplate) Unmarshal(data []byte) error {
|
func (m *JobTemplate) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -1138,7 +1140,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1166,7 +1168,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1180,7 +1182,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -1196,7 +1198,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1210,13 +1212,13 @@ func (m *JobTemplate) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1235,8 +1237,8 @@ func (m *JobTemplate) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *JobTemplateSpec) Unmarshal(data []byte) error {
|
func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -1248,7 +1250,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1276,7 +1278,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1290,7 +1292,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -1306,7 +1308,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1320,13 +1322,13 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1345,8 +1347,8 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -1357,7 +1359,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1375,7 +1377,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1392,7 +1394,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1415,7 +1417,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1426,7 +1428,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -1450,6 +1452,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 799 bytes of a gzipped FileDescriptorProto
|
// 799 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x4f, 0xe3, 0x46,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x4f, 0xe3, 0x46,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1061,17 +1061,21 @@ type NetworkPolicySpec struct {
|
|||||||
type NetworkPolicyIngressRule struct {
|
type NetworkPolicyIngressRule struct {
|
||||||
// List of ports which should be made accessible on the pods selected for this rule.
|
// List of ports which should be made accessible on the pods selected for this rule.
|
||||||
// Each item in this list is combined using a logical OR.
|
// Each item in this list is combined using a logical OR.
|
||||||
// If this field is empty or missing, this rule matches all ports (traffic not restricted by port).
|
// If this field is not provided, this rule matches all ports (traffic not restricted by port).
|
||||||
|
// If this field is empty, this rule matches no ports (no traffic matches).
|
||||||
// If this field is present and contains at least one item, then this rule allows traffic
|
// If this field is present and contains at least one item, then this rule allows traffic
|
||||||
// only if the traffic matches at least one port in the list.
|
// only if the traffic matches at least one port in the list.
|
||||||
|
// TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
|
||||||
// +optional
|
// +optional
|
||||||
Ports []NetworkPolicyPort
|
Ports []NetworkPolicyPort
|
||||||
|
|
||||||
// List of sources which should be able to access the pods selected for this rule.
|
// List of sources which should be able to access the pods selected for this rule.
|
||||||
// Items in this list are combined using a logical OR operation.
|
// Items in this list are combined using a logical OR operation.
|
||||||
// If this field is empty or missing, this rule matches all sources (traffic not restricted by source).
|
// If this field is not provided, this rule matches all sources (traffic not restricted by source).
|
||||||
|
// If this field is empty, this rule matches no sources (no traffic matches).
|
||||||
// If this field is present and contains at least on item, this rule allows traffic only if the
|
// If this field is present and contains at least on item, this rule allows traffic only if the
|
||||||
// traffic matches at least one item in the from list.
|
// traffic matches at least one item in the from list.
|
||||||
|
// TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
|
||||||
// +optional
|
// +optional
|
||||||
From []NetworkPolicyPeer
|
From []NetworkPolicyPeer
|
||||||
}
|
}
|
||||||
@@ -1096,6 +1100,7 @@ type NetworkPolicyPeer struct {
|
|||||||
|
|
||||||
// This is a label selector which selects Pods in this namespace.
|
// This is a label selector which selects Pods in this namespace.
|
||||||
// This field follows standard label selector semantics.
|
// This field follows standard label selector semantics.
|
||||||
|
// If not provided, this selector selects no pods.
|
||||||
// If present but empty, this selector selects all pods in this namespace.
|
// If present but empty, this selector selects all pods in this namespace.
|
||||||
// +optional
|
// +optional
|
||||||
PodSelector *metav1.LabelSelector
|
PodSelector *metav1.LabelSelector
|
||||||
@@ -1103,6 +1108,7 @@ type NetworkPolicyPeer struct {
|
|||||||
// Selects Namespaces using cluster scoped-labels. This
|
// Selects Namespaces using cluster scoped-labels. This
|
||||||
// matches all pods in all namespaces selected by this label selector.
|
// matches all pods in all namespaces selected by this label selector.
|
||||||
// This field follows standard label selector semantics.
|
// This field follows standard label selector semantics.
|
||||||
|
// If omitted, this selector selects no namespaces.
|
||||||
// If present but empty, this selector selects all namespaces.
|
// If present but empty, this selector selects all namespaces.
|
||||||
// +optional
|
// +optional
|
||||||
NamespaceSelector *metav1.LabelSelector
|
NamespaceSelector *metav1.LabelSelector
|
||||||
|
|||||||
@@ -127,6 +127,7 @@ func SetDefaults_ReplicaSet(obj *ReplicaSet) {
|
|||||||
func SetDefaults_NetworkPolicy(obj *NetworkPolicy) {
|
func SetDefaults_NetworkPolicy(obj *NetworkPolicy) {
|
||||||
// Default any undefined Protocol fields to TCP.
|
// Default any undefined Protocol fields to TCP.
|
||||||
for _, i := range obj.Spec.Ingress {
|
for _, i := range obj.Spec.Ingress {
|
||||||
|
// TODO: Update Ports to be a pointer to slice as soon as auto-generation supports it.
|
||||||
for _, p := range i.Ports {
|
for _, p := range i.Ports {
|
||||||
if p.Protocol == nil {
|
if p.Protocol == nil {
|
||||||
proto := v1.ProtocolTCP
|
proto := v1.ProtocolTCP
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -321,8 +321,6 @@ message DeploymentStatus {
|
|||||||
optional int32 unavailableReplicas = 5;
|
optional int32 unavailableReplicas = 5;
|
||||||
|
|
||||||
// Represents the latest available observations of a deployment's current state.
|
// Represents the latest available observations of a deployment's current state.
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated DeploymentCondition conditions = 6;
|
repeated DeploymentCondition conditions = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -541,17 +539,21 @@ message NetworkPolicy {
|
|||||||
message NetworkPolicyIngressRule {
|
message NetworkPolicyIngressRule {
|
||||||
// List of ports which should be made accessible on the pods selected for this rule.
|
// List of ports which should be made accessible on the pods selected for this rule.
|
||||||
// Each item in this list is combined using a logical OR.
|
// Each item in this list is combined using a logical OR.
|
||||||
// If this field is empty or missing, this rule matches all ports (traffic not restricted by port).
|
// If this field is not provided, this rule matches all ports (traffic not restricted by port).
|
||||||
|
// If this field is empty, this rule matches no ports (no traffic matches).
|
||||||
// If this field is present and contains at least one item, then this rule allows traffic
|
// If this field is present and contains at least one item, then this rule allows traffic
|
||||||
// only if the traffic matches at least one port in the list.
|
// only if the traffic matches at least one port in the list.
|
||||||
|
// TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
|
||||||
// +optional
|
// +optional
|
||||||
repeated NetworkPolicyPort ports = 1;
|
repeated NetworkPolicyPort ports = 1;
|
||||||
|
|
||||||
// List of sources which should be able to access the pods selected for this rule.
|
// List of sources which should be able to access the pods selected for this rule.
|
||||||
// Items in this list are combined using a logical OR operation.
|
// Items in this list are combined using a logical OR operation.
|
||||||
// If this field is empty or missing, this rule matches all sources (traffic not restricted by source).
|
// If this field is not provided, this rule matches all sources (traffic not restricted by source).
|
||||||
|
// If this field is empty, this rule matches no sources (no traffic matches).
|
||||||
// If this field is present and contains at least on item, this rule allows traffic only if the
|
// If this field is present and contains at least on item, this rule allows traffic only if the
|
||||||
// traffic matches at least one item in the from list.
|
// traffic matches at least one item in the from list.
|
||||||
|
// TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
|
||||||
// +optional
|
// +optional
|
||||||
repeated NetworkPolicyPeer from = 2;
|
repeated NetworkPolicyPeer from = 2;
|
||||||
}
|
}
|
||||||
@@ -570,6 +572,7 @@ message NetworkPolicyList {
|
|||||||
message NetworkPolicyPeer {
|
message NetworkPolicyPeer {
|
||||||
// This is a label selector which selects Pods in this namespace.
|
// This is a label selector which selects Pods in this namespace.
|
||||||
// This field follows standard label selector semantics.
|
// This field follows standard label selector semantics.
|
||||||
|
// If not provided, this selector selects no pods.
|
||||||
// If present but empty, this selector selects all pods in this namespace.
|
// If present but empty, this selector selects all pods in this namespace.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
|
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
|
||||||
@@ -577,6 +580,7 @@ message NetworkPolicyPeer {
|
|||||||
// Selects Namespaces using cluster scoped-labels. This
|
// Selects Namespaces using cluster scoped-labels. This
|
||||||
// matches all pods in all namespaces selected by this label selector.
|
// matches all pods in all namespaces selected by this label selector.
|
||||||
// This field follows standard label selector semantics.
|
// This field follows standard label selector semantics.
|
||||||
|
// If omitted, this selector selects no namespaces.
|
||||||
// If present but empty, this selector selects all namespaces.
|
// If present but empty, this selector selects all namespaces.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
|
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
|
||||||
@@ -814,8 +818,6 @@ message ReplicaSetStatus {
|
|||||||
|
|
||||||
// Represents the latest available observations of a replica set's current state.
|
// Represents the latest available observations of a replica set's current state.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
repeated ReplicaSetCondition conditions = 6;
|
repeated ReplicaSetCondition conditions = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -322,8 +322,6 @@ type DeploymentStatus struct {
|
|||||||
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
|
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
|
||||||
|
|
||||||
// Represents the latest available observations of a deployment's current state.
|
// Represents the latest available observations of a deployment's current state.
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -804,8 +802,6 @@ type ReplicaSetStatus struct {
|
|||||||
|
|
||||||
// Represents the latest available observations of a replica set's current state.
|
// Represents the latest available observations of a replica set's current state.
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1085,17 +1081,21 @@ type NetworkPolicySpec struct {
|
|||||||
type NetworkPolicyIngressRule struct {
|
type NetworkPolicyIngressRule struct {
|
||||||
// List of ports which should be made accessible on the pods selected for this rule.
|
// List of ports which should be made accessible on the pods selected for this rule.
|
||||||
// Each item in this list is combined using a logical OR.
|
// Each item in this list is combined using a logical OR.
|
||||||
// If this field is empty or missing, this rule matches all ports (traffic not restricted by port).
|
// If this field is not provided, this rule matches all ports (traffic not restricted by port).
|
||||||
|
// If this field is empty, this rule matches no ports (no traffic matches).
|
||||||
// If this field is present and contains at least one item, then this rule allows traffic
|
// If this field is present and contains at least one item, then this rule allows traffic
|
||||||
// only if the traffic matches at least one port in the list.
|
// only if the traffic matches at least one port in the list.
|
||||||
|
// TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
|
||||||
// +optional
|
// +optional
|
||||||
Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
|
Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
|
||||||
|
|
||||||
// List of sources which should be able to access the pods selected for this rule.
|
// List of sources which should be able to access the pods selected for this rule.
|
||||||
// Items in this list are combined using a logical OR operation.
|
// Items in this list are combined using a logical OR operation.
|
||||||
// If this field is empty or missing, this rule matches all sources (traffic not restricted by source).
|
// If this field is not provided, this rule matches all sources (traffic not restricted by source).
|
||||||
|
// If this field is empty, this rule matches no sources (no traffic matches).
|
||||||
// If this field is present and contains at least on item, this rule allows traffic only if the
|
// If this field is present and contains at least on item, this rule allows traffic only if the
|
||||||
// traffic matches at least one item in the from list.
|
// traffic matches at least one item in the from list.
|
||||||
|
// TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
|
||||||
// +optional
|
// +optional
|
||||||
From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"`
|
From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"`
|
||||||
}
|
}
|
||||||
@@ -1120,6 +1120,7 @@ type NetworkPolicyPeer struct {
|
|||||||
|
|
||||||
// This is a label selector which selects Pods in this namespace.
|
// This is a label selector which selects Pods in this namespace.
|
||||||
// This field follows standard label selector semantics.
|
// This field follows standard label selector semantics.
|
||||||
|
// If not provided, this selector selects no pods.
|
||||||
// If present but empty, this selector selects all pods in this namespace.
|
// If present but empty, this selector selects all pods in this namespace.
|
||||||
// +optional
|
// +optional
|
||||||
PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"`
|
PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"`
|
||||||
@@ -1127,6 +1128,7 @@ type NetworkPolicyPeer struct {
|
|||||||
// Selects Namespaces using cluster scoped-labels. This
|
// Selects Namespaces using cluster scoped-labels. This
|
||||||
// matches all pods in all namespaces selected by this label selector.
|
// matches all pods in all namespaces selected by this label selector.
|
||||||
// This field follows standard label selector semantics.
|
// This field follows standard label selector semantics.
|
||||||
|
// If omitted, this selector selects no namespaces.
|
||||||
// If present but empty, this selector selects all namespaces.
|
// If present but empty, this selector selects all namespaces.
|
||||||
// +optional
|
// +optional
|
||||||
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
|
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
|
||||||
|
|||||||
@@ -340,8 +340,8 @@ func (NetworkPolicy) SwaggerDoc() map[string]string {
|
|||||||
|
|
||||||
var map_NetworkPolicyIngressRule = map[string]string{
|
var map_NetworkPolicyIngressRule = map[string]string{
|
||||||
"": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.",
|
"": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.",
|
||||||
"ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
|
"ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
|
||||||
"from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.",
|
"from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string {
|
func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string {
|
||||||
@@ -359,8 +359,8 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_NetworkPolicyPeer = map[string]string{
|
var map_NetworkPolicyPeer = map[string]string{
|
||||||
"podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If present but empty, this selector selects all pods in this namespace.",
|
"podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.",
|
||||||
"namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If present but empty, this selector selects all namespaces.",
|
"namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (NetworkPolicyPeer) SwaggerDoc() map[string]string {
|
func (NetworkPolicyPeer) SwaggerDoc() map[string]string {
|
||||||
|
|||||||
@@ -52,7 +52,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *Eviction) Reset() { *m = Eviction{} }
|
func (m *Eviction) Reset() { *m = Eviction{} }
|
||||||
func (*Eviction) ProtoMessage() {}
|
func (*Eviction) ProtoMessage() {}
|
||||||
@@ -83,34 +85,34 @@ func init() {
|
|||||||
proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec")
|
proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec")
|
||||||
proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus")
|
proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus")
|
||||||
}
|
}
|
||||||
func (m *Eviction) Marshal() (data []byte, err error) {
|
func (m *Eviction) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Eviction) MarshalTo(data []byte) (int, error) {
|
func (m *Eviction) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
if m.DeleteOptions != nil {
|
if m.DeleteOptions != nil {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.DeleteOptions.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size()))
|
||||||
n2, err := m.DeleteOptions.MarshalTo(data[i:])
|
n2, err := m.DeleteOptions.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -119,41 +121,41 @@ func (m *Eviction) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudget) Marshal() (data []byte, err error) {
|
func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) {
|
func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n3, err := m.ObjectMeta.MarshalTo(data[i:])
|
n3, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n3
|
i += n3
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||||
n4, err := m.Spec.MarshalTo(data[i:])
|
n4, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n4
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
|
||||||
n5, err := m.Status.MarshalTo(data[i:])
|
n5, err := m.Status.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -161,35 +163,35 @@ func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) {
|
func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) {
|
func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||||
n6, err := m.ListMeta.MarshalTo(data[i:])
|
n6, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n6
|
i += n6
|
||||||
if len(m.Items) > 0 {
|
if len(m.Items) > 0 {
|
||||||
for _, msg := range m.Items {
|
for _, msg := range m.Items {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -199,34 +201,34 @@ func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) {
|
func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) {
|
func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size()))
|
||||||
n7, err := m.MinAvailable.MarshalTo(data[i:])
|
n7, err := m.MinAvailable.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n7
|
i += n7
|
||||||
if m.Selector != nil {
|
if m.Selector != nil {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
|
||||||
n8, err := m.Selector.MarshalTo(data[i:])
|
n8, err := m.Selector.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -235,86 +237,90 @@ func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) {
|
func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) {
|
func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
|
||||||
if len(m.DisruptedPods) > 0 {
|
if len(m.DisruptedPods) > 0 {
|
||||||
for k := range m.DisruptedPods {
|
for k := range m.DisruptedPods {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
v := m.DisruptedPods[k]
|
v := m.DisruptedPods[k]
|
||||||
msgSize := (&v).Size()
|
msgSize := 0
|
||||||
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
|
if (&v) != nil {
|
||||||
i = encodeVarintGenerated(data, i, uint64(mapSize))
|
msgSize = (&v).Size()
|
||||||
data[i] = 0xa
|
msgSize += 1 + sovGenerated(uint64(msgSize))
|
||||||
|
}
|
||||||
|
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(k)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
|
||||||
i += copy(data[i:], k)
|
i += copy(dAtA[i:], k)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64((&v).Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
|
||||||
n9, err := (&v).MarshalTo(data[i:])
|
n9, err := (&v).MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n9
|
i += n9
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data[i] = 0x18
|
dAtA[i] = 0x18
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.PodDisruptionsAllowed))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed))
|
||||||
data[i] = 0x20
|
dAtA[i] = 0x20
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy))
|
||||||
data[i] = 0x28
|
dAtA[i] = 0x28
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy))
|
||||||
data[i] = 0x30
|
dAtA[i] = 0x30
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods))
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *Eviction) Size() (n int) {
|
func (m *Eviction) Size() (n int) {
|
||||||
@@ -478,8 +484,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *Eviction) Unmarshal(data []byte) error {
|
func (m *Eviction) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -491,7 +497,7 @@ func (m *Eviction) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -519,7 +525,7 @@ func (m *Eviction) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -533,7 +539,7 @@ func (m *Eviction) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -549,7 +555,7 @@ func (m *Eviction) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -566,13 +572,13 @@ func (m *Eviction) Unmarshal(data []byte) error {
|
|||||||
if m.DeleteOptions == nil {
|
if m.DeleteOptions == nil {
|
||||||
m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{}
|
m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{}
|
||||||
}
|
}
|
||||||
if err := m.DeleteOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -591,8 +597,8 @@ func (m *Eviction) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -604,7 +610,7 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -632,7 +638,7 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -646,7 +652,7 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -662,7 +668,7 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -676,7 +682,7 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -692,7 +698,7 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -706,13 +712,13 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -731,8 +737,8 @@ func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
|
func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -744,7 +750,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -772,7 +778,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -786,7 +792,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -802,7 +808,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -817,13 +823,13 @@ func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Items = append(m.Items, PodDisruptionBudget{})
|
m.Items = append(m.Items, PodDisruptionBudget{})
|
||||||
if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -842,8 +848,8 @@ func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
|
func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -855,7 +861,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -883,7 +889,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -897,7 +903,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -913,7 +919,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -930,13 +936,13 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
|
|||||||
if m.Selector == nil {
|
if m.Selector == nil {
|
||||||
m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
|
m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}
|
||||||
}
|
}
|
||||||
if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -955,8 +961,8 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -968,7 +974,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -996,7 +1002,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ObservedGeneration |= (int64(b) & 0x7F) << shift
|
m.ObservedGeneration |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1015,7 +1021,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1037,7 +1043,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
keykey |= (uint64(b) & 0x7F) << shift
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1052,7 +1058,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1067,57 +1073,62 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if postStringIndexmapkey > l {
|
if postStringIndexmapkey > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
mapkey := string(data[iNdEx:postStringIndexmapkey])
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
iNdEx = postStringIndexmapkey
|
iNdEx = postStringIndexmapkey
|
||||||
var valuekey uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
valuekey |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var mapmsglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
mapmsglen |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if mapmsglen < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
postmsgIndex := iNdEx + mapmsglen
|
|
||||||
if mapmsglen < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
if postmsgIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
|
||||||
if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postmsgIndex
|
|
||||||
if m.DisruptedPods == nil {
|
if m.DisruptedPods == nil {
|
||||||
m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time)
|
m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time)
|
||||||
}
|
}
|
||||||
m.DisruptedPods[mapkey] = *mapvalue
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var mapmsglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
mapmsglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mapmsglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postmsgIndex := iNdEx + mapmsglen
|
||||||
|
if mapmsglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postmsgIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
||||||
|
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postmsgIndex
|
||||||
|
m.DisruptedPods[mapkey] = *mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time
|
||||||
|
m.DisruptedPods[mapkey] = mapvalue
|
||||||
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
@@ -1131,7 +1142,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift
|
m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1150,7 +1161,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.CurrentHealthy |= (int32(b) & 0x7F) << shift
|
m.CurrentHealthy |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1169,7 +1180,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.DesiredHealthy |= (int32(b) & 0x7F) << shift
|
m.DesiredHealthy |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1188,7 +1199,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ExpectedPods |= (int32(b) & 0x7F) << shift
|
m.ExpectedPods |= (int32(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1197,7 +1208,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1216,8 +1227,8 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -1228,7 +1239,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1246,7 +1257,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1263,7 +1274,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1286,7 +1297,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -1297,7 +1308,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -1321,6 +1332,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 773 bytes of a gzipped FileDescriptorProto
|
// 773 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0xcb, 0x6e, 0xf3, 0x44,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0xcb, 0x6e, 0xf3, 0x44,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -49,7 +49,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *PodPreset) Reset() { *m = PodPreset{} }
|
func (m *PodPreset) Reset() { *m = PodPreset{} }
|
||||||
func (*PodPreset) ProtoMessage() {}
|
func (*PodPreset) ProtoMessage() {}
|
||||||
@@ -68,33 +70,33 @@ func init() {
|
|||||||
proto.RegisterType((*PodPresetList)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetList")
|
proto.RegisterType((*PodPresetList)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetList")
|
||||||
proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetSpec")
|
proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetSpec")
|
||||||
}
|
}
|
||||||
func (m *PodPreset) Marshal() (data []byte, err error) {
|
func (m *PodPreset) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodPreset) MarshalTo(data []byte) (int, error) {
|
func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||||
n2, err := m.Spec.MarshalTo(data[i:])
|
n2, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -102,35 +104,35 @@ func (m *PodPreset) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodPresetList) Marshal() (data []byte, err error) {
|
func (m *PodPresetList) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodPresetList) MarshalTo(data []byte) (int, error) {
|
func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||||
n3, err := m.ListMeta.MarshalTo(data[i:])
|
n3, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n3
|
i += n3
|
||||||
if len(m.Items) > 0 {
|
if len(m.Items) > 0 {
|
||||||
for _, msg := range m.Items {
|
for _, msg := range m.Items {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -140,35 +142,35 @@ func (m *PodPresetList) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodPresetSpec) Marshal() (data []byte, err error) {
|
func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) {
|
func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
|
||||||
n4, err := m.Selector.MarshalTo(data[i:])
|
n4, err := m.Selector.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n4
|
||||||
if len(m.Env) > 0 {
|
if len(m.Env) > 0 {
|
||||||
for _, msg := range m.Env {
|
for _, msg := range m.Env {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -177,10 +179,10 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
if len(m.EnvFrom) > 0 {
|
if len(m.EnvFrom) > 0 {
|
||||||
for _, msg := range m.EnvFrom {
|
for _, msg := range m.EnvFrom {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -189,10 +191,10 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
if len(m.Volumes) > 0 {
|
if len(m.Volumes) > 0 {
|
||||||
for _, msg := range m.Volumes {
|
for _, msg := range m.Volumes {
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -201,10 +203,10 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
if len(m.VolumeMounts) > 0 {
|
if len(m.VolumeMounts) > 0 {
|
||||||
for _, msg := range m.VolumeMounts {
|
for _, msg := range m.VolumeMounts {
|
||||||
data[i] = 0x2a
|
dAtA[i] = 0x2a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -214,31 +216,31 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *PodPreset) Size() (n int) {
|
func (m *PodPreset) Size() (n int) {
|
||||||
@@ -354,8 +356,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *PodPreset) Unmarshal(data []byte) error {
|
func (m *PodPreset) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -367,7 +369,7 @@ func (m *PodPreset) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -395,7 +397,7 @@ func (m *PodPreset) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -409,7 +411,7 @@ func (m *PodPreset) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -425,7 +427,7 @@ func (m *PodPreset) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -439,13 +441,13 @@ func (m *PodPreset) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -464,8 +466,8 @@ func (m *PodPreset) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *PodPresetList) Unmarshal(data []byte) error {
|
func (m *PodPresetList) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -477,7 +479,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -505,7 +507,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -519,7 +521,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -535,7 +537,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -550,13 +552,13 @@ func (m *PodPresetList) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Items = append(m.Items, PodPreset{})
|
m.Items = append(m.Items, PodPreset{})
|
||||||
if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -575,8 +577,8 @@ func (m *PodPresetList) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
func (m *PodPresetSpec) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -588,7 +590,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -616,7 +618,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -630,7 +632,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -646,7 +648,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -661,7 +663,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{})
|
m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{})
|
||||||
if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -677,7 +679,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -692,7 +694,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.EnvFrom = append(m.EnvFrom, k8s_io_kubernetes_pkg_api_v1.EnvFromSource{})
|
m.EnvFrom = append(m.EnvFrom, k8s_io_kubernetes_pkg_api_v1.EnvFromSource{})
|
||||||
if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -708,7 +710,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -723,7 +725,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Volumes = append(m.Volumes, k8s_io_kubernetes_pkg_api_v1.Volume{})
|
m.Volumes = append(m.Volumes, k8s_io_kubernetes_pkg_api_v1.Volume{})
|
||||||
if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -739,7 +741,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -754,13 +756,13 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.VolumeMounts = append(m.VolumeMounts, k8s_io_kubernetes_pkg_api_v1.VolumeMount{})
|
m.VolumeMounts = append(m.VolumeMounts, k8s_io_kubernetes_pkg_api_v1.VolumeMount{})
|
||||||
if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -779,8 +781,8 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -791,7 +793,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -809,7 +811,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -826,7 +828,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -849,7 +851,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -860,7 +862,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -884,6 +886,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 550 bytes of a gzipped FileDescriptorProto
|
// 550 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x8b, 0xd3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x8b, 0xd3, 0x40,
|
||||||
|
|||||||
@@ -47,7 +47,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *StorageClass) Reset() { *m = StorageClass{} }
|
func (m *StorageClass) Reset() { *m = StorageClass{} }
|
||||||
func (*StorageClass) ProtoMessage() {}
|
func (*StorageClass) ProtoMessage() {}
|
||||||
@@ -61,82 +63,82 @@ func init() {
|
|||||||
proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClass")
|
proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClass")
|
||||||
proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClassList")
|
proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClassList")
|
||||||
}
|
}
|
||||||
func (m *StorageClass) Marshal() (data []byte, err error) {
|
func (m *StorageClass) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StorageClass) MarshalTo(data []byte) (int, error) {
|
func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner)))
|
||||||
i += copy(data[i:], m.Provisioner)
|
i += copy(dAtA[i:], m.Provisioner)
|
||||||
if len(m.Parameters) > 0 {
|
if len(m.Parameters) > 0 {
|
||||||
for k := range m.Parameters {
|
for k := range m.Parameters {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
v := m.Parameters[k]
|
v := m.Parameters[k]
|
||||||
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
|
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
|
||||||
i = encodeVarintGenerated(data, i, uint64(mapSize))
|
i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(k)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
|
||||||
i += copy(data[i:], k)
|
i += copy(dAtA[i:], k)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(v)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
|
||||||
i += copy(data[i:], v)
|
i += copy(dAtA[i:], v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StorageClassList) Marshal() (data []byte, err error) {
|
func (m *StorageClassList) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StorageClassList) MarshalTo(data []byte) (int, error) {
|
func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||||
n2, err := m.ListMeta.MarshalTo(data[i:])
|
n2, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n2
|
||||||
if len(m.Items) > 0 {
|
if len(m.Items) > 0 {
|
||||||
for _, msg := range m.Items {
|
for _, msg := range m.Items {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -146,31 +148,31 @@ func (m *StorageClassList) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *StorageClass) Size() (n int) {
|
func (m *StorageClass) Size() (n int) {
|
||||||
@@ -259,8 +261,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *StorageClass) Unmarshal(data []byte) error {
|
func (m *StorageClass) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -272,7 +274,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -300,7 +302,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -314,7 +316,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -330,7 +332,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -345,7 +347,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Provisioner = string(data[iNdEx:postIndex])
|
m.Provisioner = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -359,7 +361,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -381,7 +383,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
keykey |= (uint64(b) & 0x7F) << shift
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -396,7 +398,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -411,56 +413,61 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if postStringIndexmapkey > l {
|
if postStringIndexmapkey > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
mapkey := string(data[iNdEx:postStringIndexmapkey])
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
iNdEx = postStringIndexmapkey
|
iNdEx = postStringIndexmapkey
|
||||||
var valuekey uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
valuekey |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var stringLenmapvalue uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLenmapvalue := int(stringLenmapvalue)
|
|
||||||
if intStringLenmapvalue < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
|
||||||
if postStringIndexmapvalue > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
|
|
||||||
iNdEx = postStringIndexmapvalue
|
|
||||||
if m.Parameters == nil {
|
if m.Parameters == nil {
|
||||||
m.Parameters = make(map[string]string)
|
m.Parameters = make(map[string]string)
|
||||||
}
|
}
|
||||||
m.Parameters[mapkey] = mapvalue
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Parameters[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Parameters[mapkey] = mapvalue
|
||||||
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -479,8 +486,8 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *StorageClassList) Unmarshal(data []byte) error {
|
func (m *StorageClassList) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -492,7 +499,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -520,7 +527,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -534,7 +541,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -550,7 +557,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -565,13 +572,13 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Items = append(m.Items, StorageClass{})
|
m.Items = append(m.Items, StorageClass{})
|
||||||
if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -590,8 +597,8 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -602,7 +609,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -620,7 +627,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -637,7 +644,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -660,7 +667,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -671,7 +678,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -695,6 +702,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/storage/v1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 474 bytes of a gzipped FileDescriptorProto
|
// 474 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x4f, 0x6f, 0xd3, 0x30,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x4f, 0x6f, 0xd3, 0x30,
|
||||||
|
|||||||
@@ -47,7 +47,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
func (m *StorageClass) Reset() { *m = StorageClass{} }
|
func (m *StorageClass) Reset() { *m = StorageClass{} }
|
||||||
func (*StorageClass) ProtoMessage() {}
|
func (*StorageClass) ProtoMessage() {}
|
||||||
@@ -61,82 +63,82 @@ func init() {
|
|||||||
proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClass")
|
proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClass")
|
||||||
proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClassList")
|
proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClassList")
|
||||||
}
|
}
|
||||||
func (m *StorageClass) Marshal() (data []byte, err error) {
|
func (m *StorageClass) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StorageClass) MarshalTo(data []byte) (int, error) {
|
func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||||
n1, err := m.ObjectMeta.MarshalTo(data[i:])
|
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner)))
|
||||||
i += copy(data[i:], m.Provisioner)
|
i += copy(dAtA[i:], m.Provisioner)
|
||||||
if len(m.Parameters) > 0 {
|
if len(m.Parameters) > 0 {
|
||||||
for k := range m.Parameters {
|
for k := range m.Parameters {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
v := m.Parameters[k]
|
v := m.Parameters[k]
|
||||||
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
|
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
|
||||||
i = encodeVarintGenerated(data, i, uint64(mapSize))
|
i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(k)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
|
||||||
i += copy(data[i:], k)
|
i += copy(dAtA[i:], k)
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(len(v)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
|
||||||
i += copy(data[i:], v)
|
i += copy(dAtA[i:], v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StorageClassList) Marshal() (data []byte, err error) {
|
func (m *StorageClassList) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StorageClassList) MarshalTo(data []byte) (int, error) {
|
func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||||
n2, err := m.ListMeta.MarshalTo(data[i:])
|
n2, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n2
|
||||||
if len(m.Items) > 0 {
|
if len(m.Items) > 0 {
|
||||||
for _, msg := range m.Items {
|
for _, msg := range m.Items {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -146,31 +148,31 @@ func (m *StorageClassList) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
|
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
|
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *StorageClass) Size() (n int) {
|
func (m *StorageClass) Size() (n int) {
|
||||||
@@ -259,8 +261,8 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
func (m *StorageClass) Unmarshal(data []byte) error {
|
func (m *StorageClass) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -272,7 +274,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -300,7 +302,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -314,7 +316,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -330,7 +332,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -345,7 +347,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Provisioner = string(data[iNdEx:postIndex])
|
m.Provisioner = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -359,7 +361,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -381,7 +383,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
keykey |= (uint64(b) & 0x7F) << shift
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -396,7 +398,7 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -411,56 +413,61 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
if postStringIndexmapkey > l {
|
if postStringIndexmapkey > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
mapkey := string(data[iNdEx:postStringIndexmapkey])
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
iNdEx = postStringIndexmapkey
|
iNdEx = postStringIndexmapkey
|
||||||
var valuekey uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
valuekey |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var stringLenmapvalue uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowGenerated
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := data[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLenmapvalue := int(stringLenmapvalue)
|
|
||||||
if intStringLenmapvalue < 0 {
|
|
||||||
return ErrInvalidLengthGenerated
|
|
||||||
}
|
|
||||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
|
||||||
if postStringIndexmapvalue > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
|
|
||||||
iNdEx = postStringIndexmapvalue
|
|
||||||
if m.Parameters == nil {
|
if m.Parameters == nil {
|
||||||
m.Parameters = make(map[string]string)
|
m.Parameters = make(map[string]string)
|
||||||
}
|
}
|
||||||
m.Parameters[mapkey] = mapvalue
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Parameters[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Parameters[mapkey] = mapvalue
|
||||||
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -479,8 +486,8 @@ func (m *StorageClass) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *StorageClassList) Unmarshal(data []byte) error {
|
func (m *StorageClassList) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -492,7 +499,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -520,7 +527,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -534,7 +541,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
@@ -550,7 +557,7 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -565,13 +572,13 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Items = append(m.Items, StorageClass{})
|
m.Items = append(m.Items, StorageClass{})
|
||||||
if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(data[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -590,8 +597,8 @@ func (m *StorageClassList) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipGenerated(data []byte) (n int, err error) {
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -602,7 +609,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -620,7 +627,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -637,7 +644,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -660,7 +667,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -671,7 +678,7 @@ func skipGenerated(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipGenerated(data[start:])
|
next, err := skipGenerated(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -695,6 +702,10 @@ var (
|
|||||||
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto", fileDescriptorGenerated)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 486 bytes of a gzipped FileDescriptorProto
|
// 486 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x8b, 0xd3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x8b, 0xd3, 0x40,
|
||||||
|
|||||||
@@ -609,7 +609,7 @@ func (r *Request) URL() *url.URL {
|
|||||||
// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
|
// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
|
||||||
// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
|
// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
|
||||||
// parameters will be reset. This creates a copy of the request so as not to change the
|
// parameters will be reset. This creates a copy of the request so as not to change the
|
||||||
// underlying object. This means some useful request info (like the types of field
|
// underyling object. This means some useful request info (like the types of field
|
||||||
// selectors in use) will be lost.
|
// selectors in use) will be lost.
|
||||||
// TODO: preserve field selector keys
|
// TODO: preserve field selector keys
|
||||||
func (r Request) finalURLTemplate() url.URL {
|
func (r Request) finalURLTemplate() url.URL {
|
||||||
|
|||||||
@@ -174,12 +174,12 @@ func (f *FakeControllerSource) List(options metav1.ListOptions) (runtime.Object,
|
|||||||
if err := meta.SetList(listObj, list); err != nil {
|
if err := meta.SetList(listObj, list); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
listAccessor, err := meta.ListAccessor(listObj)
|
objMeta, err := metav1.ListMetaFor(listObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resourceVersion := len(f.changes)
|
resourceVersion := len(f.changes)
|
||||||
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||||
return listObj, nil
|
return listObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -195,12 +195,12 @@ func (f *FakePVControllerSource) List(options metav1.ListOptions) (runtime.Objec
|
|||||||
if err := meta.SetList(listObj, list); err != nil {
|
if err := meta.SetList(listObj, list); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
listAccessor, err := meta.ListAccessor(listObj)
|
objMeta, err := metav1.ListMetaFor(listObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resourceVersion := len(f.changes)
|
resourceVersion := len(f.changes)
|
||||||
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||||
return listObj, nil
|
return listObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,12 +216,12 @@ func (f *FakePVCControllerSource) List(options metav1.ListOptions) (runtime.Obje
|
|||||||
if err := meta.SetList(listObj, list); err != nil {
|
if err := meta.SetList(listObj, list); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
listAccessor, err := meta.ListAccessor(listObj)
|
objMeta, err := metav1.ListMetaFor(listObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resourceVersion := len(f.changes)
|
resourceVersion := len(f.changes)
|
||||||
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||||
return listObj, nil
|
return listObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
704
vendor/BUILD
vendored
704
vendor/BUILD
vendored
@@ -1107,6 +1107,35 @@ go_library(
|
|||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/codedellemc/goscaleio",
|
||||||
|
srcs = [
|
||||||
|
"github.com/codedellemc/goscaleio/api.go",
|
||||||
|
"github.com/codedellemc/goscaleio/certs.go",
|
||||||
|
"github.com/codedellemc/goscaleio/device.go",
|
||||||
|
"github.com/codedellemc/goscaleio/instance.go",
|
||||||
|
"github.com/codedellemc/goscaleio/protectiondomain.go",
|
||||||
|
"github.com/codedellemc/goscaleio/scsiinitiator.go",
|
||||||
|
"github.com/codedellemc/goscaleio/sdc.go",
|
||||||
|
"github.com/codedellemc/goscaleio/sds.go",
|
||||||
|
"github.com/codedellemc/goscaleio/storagepool.go",
|
||||||
|
"github.com/codedellemc/goscaleio/system.go",
|
||||||
|
"github.com/codedellemc/goscaleio/user.go",
|
||||||
|
"github.com/codedellemc/goscaleio/volume.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:github.com/Sirupsen/logrus",
|
||||||
|
"//vendor:github.com/codedellemc/goscaleio/types/v1",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/codedellemc/goscaleio/types/v1",
|
||||||
|
srcs = ["github.com/codedellemc/goscaleio/types/v1/types.go"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/codegangsta/negroni",
|
name = "github.com/codegangsta/negroni",
|
||||||
srcs = [
|
srcs = [
|
||||||
@@ -1182,6 +1211,7 @@ go_library(
|
|||||||
"//vendor:github.com/coreos/pkg/capnslog",
|
"//vendor:github.com/coreos/pkg/capnslog",
|
||||||
"//vendor:golang.org/x/crypto/bcrypt",
|
"//vendor:golang.org/x/crypto/bcrypt",
|
||||||
"//vendor:golang.org/x/net/context",
|
"//vendor:golang.org/x/net/context",
|
||||||
|
"//vendor:google.golang.org/grpc/metadata",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1247,8 +1277,10 @@ go_library(
|
|||||||
"//vendor:github.com/coreos/etcd/mvcc/mvccpb",
|
"//vendor:github.com/coreos/etcd/mvcc/mvccpb",
|
||||||
"//vendor:github.com/coreos/etcd/pkg/tlsutil",
|
"//vendor:github.com/coreos/etcd/pkg/tlsutil",
|
||||||
"//vendor:github.com/ghodss/yaml",
|
"//vendor:github.com/ghodss/yaml",
|
||||||
|
"//vendor:github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||||
"//vendor:golang.org/x/net/context",
|
"//vendor:golang.org/x/net/context",
|
||||||
"//vendor:google.golang.org/grpc",
|
"//vendor:google.golang.org/grpc",
|
||||||
|
"//vendor:google.golang.org/grpc/codes",
|
||||||
"//vendor:google.golang.org/grpc/credentials",
|
"//vendor:google.golang.org/grpc/credentials",
|
||||||
"//vendor:google.golang.org/grpc/grpclog",
|
"//vendor:google.golang.org/grpc/grpclog",
|
||||||
"//vendor:google.golang.org/grpc/metadata",
|
"//vendor:google.golang.org/grpc/metadata",
|
||||||
@@ -1355,7 +1387,6 @@ go_library(
|
|||||||
"//vendor:github.com/gogo/protobuf/proto",
|
"//vendor:github.com/gogo/protobuf/proto",
|
||||||
"//vendor:github.com/prometheus/client_golang/prometheus",
|
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||||
"//vendor:golang.org/x/net/context",
|
"//vendor:golang.org/x/net/context",
|
||||||
"//vendor:google.golang.org/grpc/metadata",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1458,6 +1489,7 @@ go_library(
|
|||||||
"//vendor:github.com/coreos/etcd/version",
|
"//vendor:github.com/coreos/etcd/version",
|
||||||
"//vendor:github.com/coreos/pkg/capnslog",
|
"//vendor:github.com/coreos/pkg/capnslog",
|
||||||
"//vendor:github.com/gogo/protobuf/proto",
|
"//vendor:github.com/gogo/protobuf/proto",
|
||||||
|
"//vendor:github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||||
"//vendor:github.com/prometheus/client_golang/prometheus",
|
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||||
"//vendor:golang.org/x/net/context",
|
"//vendor:golang.org/x/net/context",
|
||||||
"//vendor:google.golang.org/grpc",
|
"//vendor:google.golang.org/grpc",
|
||||||
@@ -1542,6 +1574,7 @@ go_library(
|
|||||||
"//vendor:github.com/coreos/etcd/version",
|
"//vendor:github.com/coreos/etcd/version",
|
||||||
"//vendor:github.com/coreos/go-semver/semver",
|
"//vendor:github.com/coreos/go-semver/semver",
|
||||||
"//vendor:github.com/coreos/pkg/capnslog",
|
"//vendor:github.com/coreos/pkg/capnslog",
|
||||||
|
"//vendor:golang.org/x/net/context",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1565,6 +1598,7 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"github.com/coreos/etcd/integration/bridge.go",
|
"github.com/coreos/etcd/integration/bridge.go",
|
||||||
"github.com/coreos/etcd/integration/cluster.go",
|
"github.com/coreos/etcd/integration/cluster.go",
|
||||||
|
"github.com/coreos/etcd/integration/cluster_direct.go",
|
||||||
"github.com/coreos/etcd/integration/doc.go",
|
"github.com/coreos/etcd/integration/doc.go",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
@@ -1596,6 +1630,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//vendor:github.com/coreos/etcd/lease/leasepb",
|
"//vendor:github.com/coreos/etcd/lease/leasepb",
|
||||||
"//vendor:github.com/coreos/etcd/mvcc/backend",
|
"//vendor:github.com/coreos/etcd/mvcc/backend",
|
||||||
|
"//vendor:github.com/coreos/etcd/pkg/monotime",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1609,6 +1644,8 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//vendor:github.com/coreos/etcd/etcdserver/etcdserverpb",
|
"//vendor:github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
"//vendor:github.com/coreos/etcd/lease",
|
"//vendor:github.com/coreos/etcd/lease",
|
||||||
|
"//vendor:github.com/coreos/etcd/lease/leasepb",
|
||||||
|
"//vendor:github.com/coreos/etcd/pkg/httputil",
|
||||||
"//vendor:golang.org/x/net/context",
|
"//vendor:golang.org/x/net/context",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -1617,7 +1654,10 @@ go_library(
|
|||||||
name = "github.com/coreos/etcd/lease/leasepb",
|
name = "github.com/coreos/etcd/lease/leasepb",
|
||||||
srcs = ["github.com/coreos/etcd/lease/leasepb/lease.pb.go"],
|
srcs = ["github.com/coreos/etcd/lease/leasepb/lease.pb.go"],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = ["//vendor:github.com/golang/protobuf/proto"],
|
deps = [
|
||||||
|
"//vendor:github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
|
"//vendor:github.com/golang/protobuf/proto",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
@@ -1692,6 +1732,15 @@ go_library(
|
|||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/coreos/etcd/pkg/cpuutil",
|
||||||
|
srcs = [
|
||||||
|
"github.com/coreos/etcd/pkg/cpuutil/doc.go",
|
||||||
|
"github.com/coreos/etcd/pkg/cpuutil/endian.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/coreos/etcd/pkg/crc",
|
name = "github.com/coreos/etcd/pkg/crc",
|
||||||
srcs = ["github.com/coreos/etcd/pkg/crc/crc.go"],
|
srcs = ["github.com/coreos/etcd/pkg/crc/crc.go"],
|
||||||
@@ -1746,16 +1795,29 @@ go_library(
|
|||||||
deps = ["//vendor:github.com/coreos/pkg/capnslog"],
|
deps = ["//vendor:github.com/coreos/pkg/capnslog"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/coreos/etcd/pkg/monotime",
|
||||||
|
srcs = [
|
||||||
|
"github.com/coreos/etcd/pkg/monotime/issue15006.s",
|
||||||
|
"github.com/coreos/etcd/pkg/monotime/monotime.go",
|
||||||
|
"github.com/coreos/etcd/pkg/monotime/nanotime.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/coreos/etcd/pkg/netutil",
|
name = "github.com/coreos/etcd/pkg/netutil",
|
||||||
srcs = [
|
srcs = [
|
||||||
"github.com/coreos/etcd/pkg/netutil/isolate_linux.go",
|
"github.com/coreos/etcd/pkg/netutil/isolate_linux.go",
|
||||||
"github.com/coreos/etcd/pkg/netutil/netutil.go",
|
"github.com/coreos/etcd/pkg/netutil/netutil.go",
|
||||||
|
"github.com/coreos/etcd/pkg/netutil/routes_linux.go",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//vendor:github.com/coreos/etcd/pkg/cpuutil",
|
||||||
"//vendor:github.com/coreos/etcd/pkg/types",
|
"//vendor:github.com/coreos/etcd/pkg/types",
|
||||||
"//vendor:github.com/coreos/pkg/capnslog",
|
"//vendor:github.com/coreos/pkg/capnslog",
|
||||||
|
"//vendor:golang.org/x/net/context",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1852,6 +1914,53 @@ go_library(
|
|||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/coreos/etcd/proxy/grpcproxy",
|
||||||
|
srcs = [
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/auth.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/cluster.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/doc.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/kv.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/lease.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/maintenance.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/metrics.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/watch.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go",
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy/watcher.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:github.com/coreos/etcd/clientv3",
|
||||||
|
"//vendor:github.com/coreos/etcd/etcdserver/api/v3rpc",
|
||||||
|
"//vendor:github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
|
"//vendor:github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
|
"//vendor:github.com/coreos/etcd/mvcc",
|
||||||
|
"//vendor:github.com/coreos/etcd/mvcc/mvccpb",
|
||||||
|
"//vendor:github.com/coreos/etcd/proxy/grpcproxy/cache",
|
||||||
|
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||||
|
"//vendor:golang.org/x/net/context",
|
||||||
|
"//vendor:golang.org/x/time/rate",
|
||||||
|
"//vendor:google.golang.org/grpc",
|
||||||
|
"//vendor:google.golang.org/grpc/metadata",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
||||||
|
srcs = ["github.com/coreos/etcd/proxy/grpcproxy/cache/store.go"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||||
|
"//vendor:github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||||
|
"//vendor:github.com/coreos/etcd/pkg/adt",
|
||||||
|
"//vendor:github.com/karlseguin/ccache",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/coreos/etcd/raft",
|
name = "github.com/coreos/etcd/raft",
|
||||||
srcs = [
|
srcs = [
|
||||||
@@ -1863,6 +1972,7 @@ go_library(
|
|||||||
"github.com/coreos/etcd/raft/progress.go",
|
"github.com/coreos/etcd/raft/progress.go",
|
||||||
"github.com/coreos/etcd/raft/raft.go",
|
"github.com/coreos/etcd/raft/raft.go",
|
||||||
"github.com/coreos/etcd/raft/rawnode.go",
|
"github.com/coreos/etcd/raft/rawnode.go",
|
||||||
|
"github.com/coreos/etcd/raft/read_only.go",
|
||||||
"github.com/coreos/etcd/raft/status.go",
|
"github.com/coreos/etcd/raft/status.go",
|
||||||
"github.com/coreos/etcd/raft/storage.go",
|
"github.com/coreos/etcd/raft/storage.go",
|
||||||
"github.com/coreos/etcd/raft/util.go",
|
"github.com/coreos/etcd/raft/util.go",
|
||||||
@@ -1979,10 +2089,7 @@ go_library(
|
|||||||
name = "github.com/coreos/etcd/version",
|
name = "github.com/coreos/etcd/version",
|
||||||
srcs = ["github.com/coreos/etcd/version/version.go"],
|
srcs = ["github.com/coreos/etcd/version/version.go"],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = ["//vendor:github.com/coreos/go-semver/semver"],
|
||||||
"//vendor:github.com/coreos/etcd/pkg/fileutil",
|
|
||||||
"//vendor:github.com/coreos/etcd/pkg/types",
|
|
||||||
],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
@@ -3284,6 +3391,8 @@ go_library(
|
|||||||
"github.com/gogo/protobuf/proto/clone.go",
|
"github.com/gogo/protobuf/proto/clone.go",
|
||||||
"github.com/gogo/protobuf/proto/decode.go",
|
"github.com/gogo/protobuf/proto/decode.go",
|
||||||
"github.com/gogo/protobuf/proto/decode_gogo.go",
|
"github.com/gogo/protobuf/proto/decode_gogo.go",
|
||||||
|
"github.com/gogo/protobuf/proto/duration.go",
|
||||||
|
"github.com/gogo/protobuf/proto/duration_gogo.go",
|
||||||
"github.com/gogo/protobuf/proto/encode.go",
|
"github.com/gogo/protobuf/proto/encode.go",
|
||||||
"github.com/gogo/protobuf/proto/encode_gogo.go",
|
"github.com/gogo/protobuf/proto/encode_gogo.go",
|
||||||
"github.com/gogo/protobuf/proto/equal.go",
|
"github.com/gogo/protobuf/proto/equal.go",
|
||||||
@@ -3300,6 +3409,8 @@ go_library(
|
|||||||
"github.com/gogo/protobuf/proto/text.go",
|
"github.com/gogo/protobuf/proto/text.go",
|
||||||
"github.com/gogo/protobuf/proto/text_gogo.go",
|
"github.com/gogo/protobuf/proto/text_gogo.go",
|
||||||
"github.com/gogo/protobuf/proto/text_parser.go",
|
"github.com/gogo/protobuf/proto/text_parser.go",
|
||||||
|
"github.com/gogo/protobuf/proto/timestamp.go",
|
||||||
|
"github.com/gogo/protobuf/proto/timestamp_gogo.go",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
@@ -3307,8 +3418,9 @@ go_library(
|
|||||||
go_library(
|
go_library(
|
||||||
name = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
|
name = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go",
|
||||||
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go",
|
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go",
|
||||||
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go",
|
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go",
|
||||||
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go",
|
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
@@ -4550,6 +4662,37 @@ go_library(
|
|||||||
deps = ["//vendor:github.com/gorilla/context"],
|
deps = ["//vendor:github.com/gorilla/context"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/gorilla/websocket",
|
||||||
|
srcs = [
|
||||||
|
"github.com/gorilla/websocket/client.go",
|
||||||
|
"github.com/gorilla/websocket/conn.go",
|
||||||
|
"github.com/gorilla/websocket/doc.go",
|
||||||
|
"github.com/gorilla/websocket/json.go",
|
||||||
|
"github.com/gorilla/websocket/server.go",
|
||||||
|
"github.com/gorilla/websocket/util.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||||
|
srcs = [
|
||||||
|
"github.com/grpc-ecosystem/go-grpc-prometheus/client.go",
|
||||||
|
"github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go",
|
||||||
|
"github.com/grpc-ecosystem/go-grpc-prometheus/server.go",
|
||||||
|
"github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go",
|
||||||
|
"github.com/grpc-ecosystem/go-grpc-prometheus/util.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||||
|
"//vendor:golang.org/x/net/context",
|
||||||
|
"//vendor:google.golang.org/grpc",
|
||||||
|
"//vendor:google.golang.org/grpc/codes",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
name = "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||||
srcs = [
|
srcs = [
|
||||||
@@ -4902,6 +5045,20 @@ go_library(
|
|||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/karlseguin/ccache",
|
||||||
|
srcs = [
|
||||||
|
"github.com/karlseguin/ccache/bucket.go",
|
||||||
|
"github.com/karlseguin/ccache/cache.go",
|
||||||
|
"github.com/karlseguin/ccache/configuration.go",
|
||||||
|
"github.com/karlseguin/ccache/item.go",
|
||||||
|
"github.com/karlseguin/ccache/layeredbucket.go",
|
||||||
|
"github.com/karlseguin/ccache/layeredcache.go",
|
||||||
|
"github.com/karlseguin/ccache/secondarycache.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/kr/fs",
|
name = "github.com/kr/fs",
|
||||||
srcs = [
|
srcs = [
|
||||||
@@ -5433,6 +5590,19 @@ go_library(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/onsi/ginkgo/internal/spec_iterator",
|
||||||
|
srcs = [
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go",
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go",
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go",
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go",
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = ["//vendor:github.com/onsi/ginkgo/internal/spec"],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/onsi/ginkgo/internal/specrunner",
|
name = "github.com/onsi/ginkgo/internal/specrunner",
|
||||||
srcs = [
|
srcs = [
|
||||||
@@ -6443,6 +6613,164 @@ go_library(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "github.com/rancher/go-rancher/client",
|
||||||
|
srcs = [
|
||||||
|
"github.com/rancher/go-rancher/client/client.go",
|
||||||
|
"github.com/rancher/go-rancher/client/common.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_account.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_active_setting.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_add_outputs_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_add_remove_load_balancer_service_link_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_add_remove_service_link_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_agent.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_amazonec2config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_api_key.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_audit_log.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_azure_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_azureadconfig.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_backup.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_backup_target.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_base_machine_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_blkio_device_option.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_certificate.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_change_secret_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_client.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_compose_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_compose_config_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_compose_project.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_compose_service.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_config_item.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_config_item_status.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_container.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_container_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_container_exec.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_container_logs.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_container_proxy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_credential.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_databasechangelog.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_databasechangeloglock.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_digitalocean_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_dns_service.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_docker_build.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_dynamic_schema.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_environment.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_environment_upgrade.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_extension_implementation.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_extension_point.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_dns_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_handler.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_handler_external_handler_process_map.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_handler_process.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_handler_process_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_host_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_service.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_service_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_storage_pool_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_external_volume_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_field_documentation.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_githubconfig.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_ha_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_ha_config_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_haproxy_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_healthcheck_instance_host_map.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_host.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_host_access.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_host_api_proxy_token.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_identity.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_image.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_in_service_upgrade_strategy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_instance.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_instance_console.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_instance_console_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_instance_health_check.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_instance_link.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_instance_stop.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_ip_address.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_ip_address_associate_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_kubernetes_service.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_kubernetes_stack.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_kubernetes_stack_upgrade.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_label.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_launch_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_ldapconfig.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_load_balancer_app_cookie_stickiness_policy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_load_balancer_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_load_balancer_cookie_stickiness_policy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_load_balancer_service.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_load_balancer_service_link.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_local_auth_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_log_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_machine.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_machine_driver.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_mount.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_network.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_nfs_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_openldapconfig.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_packet_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_password.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_physical_host.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_port.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_process_definition.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_process_execution.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_process_instance.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_project.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_project_member.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_public_endpoint.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_publish.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_pull_task.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_recreate_on_quorum_strategy_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_register.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_registration_token.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_registry.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_registry_credential.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_resource_definition.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_restart_policy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_restore_from_backup_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_revert_to_snapshot_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_rolling_restart_strategy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_scale_policy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_secondary_launch_config.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_consume_map.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_event.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_expose_map.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_link.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_proxy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_restart.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_upgrade.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_service_upgrade_strategy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_services_port_range.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_set_labels_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_set_load_balancer_service_links_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_set_project_members_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_set_service_links_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_setting.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_snapshot.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_snapshot_backup_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_state_transition.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_stats_access.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_storage_pool.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_subscribe.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_task.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_task_instance.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_to_service_upgrade_strategy.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_type_documentation.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_virtual_machine.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_virtual_machine_disk.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_volume.go",
|
||||||
|
"github.com/rancher/go-rancher/client/generated_volume_snapshot_input.go",
|
||||||
|
"github.com/rancher/go-rancher/client/schemas.go",
|
||||||
|
"github.com/rancher/go-rancher/client/types.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:github.com/gorilla/websocket",
|
||||||
|
"//vendor:github.com/pkg/errors",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "github.com/renstrom/dedent",
|
name = "github.com/renstrom/dedent",
|
||||||
srcs = ["github.com/renstrom/dedent/dedent.go"],
|
srcs = ["github.com/renstrom/dedent/dedent.go"],
|
||||||
@@ -7482,18 +7810,23 @@ go_library(
|
|||||||
"golang.org/x/net/http2/frame.go",
|
"golang.org/x/net/http2/frame.go",
|
||||||
"golang.org/x/net/http2/go16.go",
|
"golang.org/x/net/http2/go16.go",
|
||||||
"golang.org/x/net/http2/go17.go",
|
"golang.org/x/net/http2/go17.go",
|
||||||
|
"golang.org/x/net/http2/go17_not18.go",
|
||||||
"golang.org/x/net/http2/gotrack.go",
|
"golang.org/x/net/http2/gotrack.go",
|
||||||
"golang.org/x/net/http2/headermap.go",
|
"golang.org/x/net/http2/headermap.go",
|
||||||
"golang.org/x/net/http2/http2.go",
|
"golang.org/x/net/http2/http2.go",
|
||||||
|
"golang.org/x/net/http2/not_go18.go",
|
||||||
"golang.org/x/net/http2/pipe.go",
|
"golang.org/x/net/http2/pipe.go",
|
||||||
"golang.org/x/net/http2/server.go",
|
"golang.org/x/net/http2/server.go",
|
||||||
"golang.org/x/net/http2/transport.go",
|
"golang.org/x/net/http2/transport.go",
|
||||||
"golang.org/x/net/http2/write.go",
|
"golang.org/x/net/http2/write.go",
|
||||||
"golang.org/x/net/http2/writesched.go",
|
"golang.org/x/net/http2/writesched.go",
|
||||||
|
"golang.org/x/net/http2/writesched_priority.go",
|
||||||
|
"golang.org/x/net/http2/writesched_random.go",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
"//vendor:golang.org/x/net/http2/hpack",
|
"//vendor:golang.org/x/net/http2/hpack",
|
||||||
|
"//vendor:golang.org/x/net/idna",
|
||||||
"//vendor:golang.org/x/net/lex/httplex",
|
"//vendor:golang.org/x/net/lex/httplex",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -7528,6 +7861,7 @@ go_library(
|
|||||||
name = "golang.org/x/net/lex/httplex",
|
name = "golang.org/x/net/lex/httplex",
|
||||||
srcs = ["golang.org/x/net/lex/httplex/httplex.go"],
|
srcs = ["golang.org/x/net/lex/httplex/httplex.go"],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
|
deps = ["//vendor:golang.org/x/net/idna"],
|
||||||
)
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
@@ -7559,6 +7893,7 @@ go_library(
|
|||||||
name = "golang.org/x/net/websocket",
|
name = "golang.org/x/net/websocket",
|
||||||
srcs = [
|
srcs = [
|
||||||
"golang.org/x/net/websocket/client.go",
|
"golang.org/x/net/websocket/client.go",
|
||||||
|
"golang.org/x/net/websocket/dial.go",
|
||||||
"golang.org/x/net/websocket/hybi.go",
|
"golang.org/x/net/websocket/hybi.go",
|
||||||
"golang.org/x/net/websocket/server.go",
|
"golang.org/x/net/websocket/server.go",
|
||||||
"golang.org/x/net/websocket/websocket.go",
|
"golang.org/x/net/websocket/websocket.go",
|
||||||
@@ -7845,6 +8180,13 @@ go_library(
|
|||||||
deps = ["//vendor:golang.org/x/text/transform"],
|
deps = ["//vendor:golang.org/x/text/transform"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "golang.org/x/time/rate",
|
||||||
|
srcs = ["golang.org/x/time/rate/rate.go"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = ["//vendor:golang.org/x/net/context"],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "google.golang.org/api/cloudmonitoring/v2beta2",
|
name = "google.golang.org/api/cloudmonitoring/v2beta2",
|
||||||
srcs = ["google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go"],
|
srcs = ["google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go"],
|
||||||
@@ -9534,6 +9876,35 @@ go_library(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "k8s.io/apiserver/pkg/admission/initializer",
|
||||||
|
srcs = [
|
||||||
|
"k8s.io/apiserver/pkg/admission/initializer/initializer.go",
|
||||||
|
"k8s.io/apiserver/pkg/admission/initializer/interfaces.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||||
|
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||||
|
"//vendor:k8s.io/client-go/informers",
|
||||||
|
"//vendor:k8s.io/client-go/kubernetes",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "k8s.io/apiserver/pkg/admission/initializer_xtest",
|
||||||
|
srcs = ["k8s.io/apiserver/pkg/admission/initializer/initializer_test.go"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||||
|
"//vendor:k8s.io/apiserver/pkg/admission/initializer",
|
||||||
|
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||||
|
"//vendor:k8s.io/client-go/informers",
|
||||||
|
"//vendor:k8s.io/client-go/kubernetes",
|
||||||
|
"//vendor:k8s.io/client-go/kubernetes/fake",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "k8s.io/apiserver/pkg/apis/apiserver",
|
name = "k8s.io/apiserver/pkg/apis/apiserver",
|
||||||
srcs = [
|
srcs = [
|
||||||
@@ -13514,7 +13885,6 @@ go_library(
|
|||||||
go_library(
|
go_library(
|
||||||
name = "k8s.io/client-go/pkg/api",
|
name = "k8s.io/client-go/pkg/api",
|
||||||
srcs = [
|
srcs = [
|
||||||
"k8s.io/client-go/pkg/api/annotation_key_constants.go",
|
|
||||||
"k8s.io/client-go/pkg/api/defaults.go",
|
"k8s.io/client-go/pkg/api/defaults.go",
|
||||||
"k8s.io/client-go/pkg/api/doc.go",
|
"k8s.io/client-go/pkg/api/doc.go",
|
||||||
"k8s.io/client-go/pkg/api/field_constants.go",
|
"k8s.io/client-go/pkg/api/field_constants.go",
|
||||||
@@ -15901,6 +16271,44 @@ go_library(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "k8s.io/kube-aggregator/pkg/controllers/autoregister_test",
|
||||||
|
srcs = ["k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go"],
|
||||||
|
library = ":k8s.io/kube-aggregator/pkg/controllers/autoregister",
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||||
|
"//vendor:k8s.io/client-go/testing",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/apis/apiregistration",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/client/informers/internalversion",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "k8s.io/kube-aggregator/pkg/controllers/autoregister",
|
||||||
|
srcs = ["k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor:github.com/golang/glog",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/conversion",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||||
|
"//vendor:k8s.io/client-go/tools/cache",
|
||||||
|
"//vendor:k8s.io/client-go/util/workqueue",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/apis/apiregistration",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion",
|
||||||
|
"//vendor:k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "k8s.io/kube-aggregator/pkg/registry/apiservice",
|
name = "k8s.io/kube-aggregator/pkg/registry/apiservice",
|
||||||
srcs = ["k8s.io/kube-aggregator/pkg/registry/apiservice/strategy.go"],
|
srcs = ["k8s.io/kube-aggregator/pkg/registry/apiservice/strategy.go"],
|
||||||
@@ -16338,283 +16746,3 @@ filegroup(
|
|||||||
srcs = [":package-srcs"],
|
srcs = [":package-srcs"],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "github.com/gorilla/websocket",
|
|
||||||
srcs = [
|
|
||||||
"github.com/gorilla/websocket/client.go",
|
|
||||||
"github.com/gorilla/websocket/conn.go",
|
|
||||||
"github.com/gorilla/websocket/doc.go",
|
|
||||||
"github.com/gorilla/websocket/json.go",
|
|
||||||
"github.com/gorilla/websocket/server.go",
|
|
||||||
"github.com/gorilla/websocket/util.go",
|
|
||||||
],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "github.com/rancher/go-rancher/client",
|
|
||||||
srcs = [
|
|
||||||
"github.com/rancher/go-rancher/client/client.go",
|
|
||||||
"github.com/rancher/go-rancher/client/common.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_account.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_active_setting.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_add_outputs_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_add_remove_load_balancer_service_link_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_add_remove_service_link_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_agent.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_amazonec2config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_api_key.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_audit_log.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_azure_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_azureadconfig.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_backup.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_backup_target.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_base_machine_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_blkio_device_option.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_certificate.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_change_secret_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_client.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_compose_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_compose_config_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_compose_project.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_compose_service.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_config_item.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_config_item_status.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_container.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_container_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_container_exec.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_container_logs.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_container_proxy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_credential.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_databasechangelog.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_databasechangeloglock.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_digitalocean_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_dns_service.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_docker_build.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_dynamic_schema.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_environment.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_environment_upgrade.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_extension_implementation.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_extension_point.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_dns_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_handler.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_handler_external_handler_process_map.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_handler_process.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_handler_process_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_host_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_service.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_service_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_storage_pool_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_external_volume_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_field_documentation.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_githubconfig.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_ha_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_ha_config_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_haproxy_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_healthcheck_instance_host_map.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_host.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_host_access.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_host_api_proxy_token.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_identity.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_image.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_in_service_upgrade_strategy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_instance.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_instance_console.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_instance_console_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_instance_health_check.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_instance_link.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_instance_stop.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_ip_address.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_ip_address_associate_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_kubernetes_service.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_kubernetes_stack.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_kubernetes_stack_upgrade.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_label.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_launch_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_ldapconfig.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_load_balancer_app_cookie_stickiness_policy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_load_balancer_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_load_balancer_cookie_stickiness_policy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_load_balancer_service.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_load_balancer_service_link.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_local_auth_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_log_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_machine.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_machine_driver.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_mount.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_network.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_nfs_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_openldapconfig.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_packet_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_password.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_physical_host.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_port.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_process_definition.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_process_execution.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_process_instance.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_project.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_project_member.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_public_endpoint.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_publish.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_pull_task.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_recreate_on_quorum_strategy_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_register.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_registration_token.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_registry.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_registry_credential.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_resource_definition.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_restart_policy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_restore_from_backup_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_revert_to_snapshot_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_rolling_restart_strategy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_scale_policy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_secondary_launch_config.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_consume_map.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_event.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_expose_map.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_link.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_proxy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_restart.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_upgrade.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_service_upgrade_strategy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_services_port_range.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_set_labels_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_set_load_balancer_service_links_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_set_project_members_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_set_service_links_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_setting.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_snapshot.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_snapshot_backup_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_state_transition.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_stats_access.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_storage_pool.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_subscribe.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_task.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_task_instance.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_to_service_upgrade_strategy.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_type_documentation.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_virtual_machine.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_virtual_machine_disk.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_volume.go",
|
|
||||||
"github.com/rancher/go-rancher/client/generated_volume_snapshot_input.go",
|
|
||||||
"github.com/rancher/go-rancher/client/schemas.go",
|
|
||||||
"github.com/rancher/go-rancher/client/types.go",
|
|
||||||
],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = [
|
|
||||||
"//vendor:github.com/gorilla/websocket",
|
|
||||||
"//vendor:github.com/pkg/errors",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "github.com/codedellemc/goscaleio",
|
|
||||||
srcs = [
|
|
||||||
"github.com/codedellemc/goscaleio/api.go",
|
|
||||||
"github.com/codedellemc/goscaleio/certs.go",
|
|
||||||
"github.com/codedellemc/goscaleio/device.go",
|
|
||||||
"github.com/codedellemc/goscaleio/instance.go",
|
|
||||||
"github.com/codedellemc/goscaleio/protectiondomain.go",
|
|
||||||
"github.com/codedellemc/goscaleio/scsiinitiator.go",
|
|
||||||
"github.com/codedellemc/goscaleio/sdc.go",
|
|
||||||
"github.com/codedellemc/goscaleio/sds.go",
|
|
||||||
"github.com/codedellemc/goscaleio/storagepool.go",
|
|
||||||
"github.com/codedellemc/goscaleio/system.go",
|
|
||||||
"github.com/codedellemc/goscaleio/user.go",
|
|
||||||
"github.com/codedellemc/goscaleio/volume.go",
|
|
||||||
],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = [
|
|
||||||
"//vendor:github.com/Sirupsen/logrus",
|
|
||||||
"//vendor:github.com/codedellemc/goscaleio/types/v1",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "github.com/codedellemc/goscaleio/types/v1",
|
|
||||||
srcs = ["github.com/codedellemc/goscaleio/types/v1/types.go"],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "k8s.io/kube-aggregator/pkg/controllers/autoregister_test",
|
|
||||||
srcs = ["k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go"],
|
|
||||||
library = ":k8s.io/kube-aggregator/pkg/controllers/autoregister",
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = [
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
|
||||||
"//vendor:k8s.io/client-go/testing",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/apis/apiregistration",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/client/informers/internalversion",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "k8s.io/kube-aggregator/pkg/controllers/autoregister",
|
|
||||||
srcs = ["k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go"],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = [
|
|
||||||
"//vendor:github.com/golang/glog",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/conversion",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
|
||||||
"//vendor:k8s.io/client-go/tools/cache",
|
|
||||||
"//vendor:k8s.io/client-go/util/workqueue",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/apis/apiregistration",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion",
|
|
||||||
"//vendor:k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "k8s.io/apiserver/pkg/admission/initializer",
|
|
||||||
srcs = [
|
|
||||||
"k8s.io/apiserver/pkg/admission/initializer/initializer.go",
|
|
||||||
"k8s.io/apiserver/pkg/admission/initializer/interfaces.go",
|
|
||||||
],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = [
|
|
||||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
|
||||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
|
||||||
"//vendor:k8s.io/client-go/informers",
|
|
||||||
"//vendor:k8s.io/client-go/kubernetes",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "k8s.io/apiserver/pkg/admission/initializer_xtest",
|
|
||||||
srcs = ["k8s.io/apiserver/pkg/admission/initializer/initializer_test.go"],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = [
|
|
||||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
|
||||||
"//vendor:k8s.io/apiserver/pkg/admission/initializer",
|
|
||||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
|
||||||
"//vendor:k8s.io/client-go/informers",
|
|
||||||
"//vendor:k8s.io/client-go/kubernetes",
|
|
||||||
"//vendor:k8s.io/client-go/kubernetes/fake",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "github.com/onsi/ginkgo/internal/spec_iterator",
|
|
||||||
srcs = [
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go",
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go",
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go",
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go",
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go",
|
|
||||||
],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
deps = ["//vendor:github.com/onsi/ginkgo/internal/spec"],
|
|
||||||
)
|
|
||||||
|
|||||||
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
Normal file
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# This is the official list of cloud authors for copyright purposes.
|
||||||
|
# This file is distinct from the CONTRIBUTORS files.
|
||||||
|
# See the latter for an explanation.
|
||||||
|
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
|
||||||
|
Filippo Valsorda <hi@filippo.io>
|
||||||
|
Google Inc.
|
||||||
|
Ingo Oeser <nightlyone@googlemail.com>
|
||||||
|
Palm Stone Games, Inc.
|
||||||
|
Paweł Knap <pawelknap88@gmail.com>
|
||||||
|
Péter Szilágyi <peterke@gmail.com>
|
||||||
|
Tyler Treat <ttreat31@gmail.com>
|
||||||
34
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
Normal file
34
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# People who have agreed to one of the CLAs and can contribute patches.
|
||||||
|
# The AUTHORS file lists the copyright holders; this file
|
||||||
|
# lists people. For example, Google employees are listed here
|
||||||
|
# but not in AUTHORS, because Google holds the copyright.
|
||||||
|
#
|
||||||
|
# https://developers.google.com/open-source/cla/individual
|
||||||
|
# https://developers.google.com/open-source/cla/corporate
|
||||||
|
#
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name <email address>
|
||||||
|
|
||||||
|
# Keep the list alphabetically sorted.
|
||||||
|
|
||||||
|
Andreas Litt <andreas.litt@gmail.com>
|
||||||
|
Andrew Gerrand <adg@golang.org>
|
||||||
|
Brad Fitzpatrick <bradfitz@golang.org>
|
||||||
|
Burcu Dogan <jbd@google.com>
|
||||||
|
Dave Day <djd@golang.org>
|
||||||
|
David Sansome <me@davidsansome.com>
|
||||||
|
David Symonds <dsymonds@golang.org>
|
||||||
|
Filippo Valsorda <hi@filippo.io>
|
||||||
|
Glenn Lewis <gmlewis@google.com>
|
||||||
|
Ingo Oeser <nightlyone@googlemail.com>
|
||||||
|
Johan Euphrosine <proppy@google.com>
|
||||||
|
Jonathan Amsterdam <jba@google.com>
|
||||||
|
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||||
|
Michael McGreevy <mcgreevy@golang.org>
|
||||||
|
Omar Jarjur <ojarjur@google.com>
|
||||||
|
Paweł Knap <pawelknap88@gmail.com>
|
||||||
|
Péter Szilágyi <peterke@gmail.com>
|
||||||
|
Sarah Adams <shadams@google.com>
|
||||||
|
Toby Burress <kurin@google.com>
|
||||||
|
Tuo Shan <shantuo@google.com>
|
||||||
|
Tyler Treat <ttreat31@gmail.com>
|
||||||
186
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
186
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
@@ -32,7 +32,9 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.ProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Permission_Type int32
|
type Permission_Type int32
|
||||||
|
|
||||||
@@ -99,113 +101,113 @@ func init() {
|
|||||||
proto.RegisterType((*Role)(nil), "authpb.Role")
|
proto.RegisterType((*Role)(nil), "authpb.Role")
|
||||||
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
||||||
}
|
}
|
||||||
func (m *User) Marshal() (data []byte, err error) {
|
func (m *User) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *User) MarshalTo(data []byte) (int, error) {
|
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Name) > 0 {
|
if len(m.Name) > 0 {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
i += copy(data[i:], m.Name)
|
i += copy(dAtA[i:], m.Name)
|
||||||
}
|
}
|
||||||
if len(m.Password) > 0 {
|
if len(m.Password) > 0 {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
||||||
i += copy(data[i:], m.Password)
|
i += copy(dAtA[i:], m.Password)
|
||||||
}
|
}
|
||||||
if len(m.Roles) > 0 {
|
if len(m.Roles) > 0 {
|
||||||
for _, s := range m.Roles {
|
for _, s := range m.Roles {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
l = len(s)
|
l = len(s)
|
||||||
for l >= 1<<7 {
|
for l >= 1<<7 {
|
||||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
l >>= 7
|
l >>= 7
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = uint8(l)
|
dAtA[i] = uint8(l)
|
||||||
i++
|
i++
|
||||||
i += copy(data[i:], s)
|
i += copy(dAtA[i:], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) Marshal() (data []byte, err error) {
|
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) MarshalTo(data []byte) (int, error) {
|
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.PermType != 0 {
|
if m.PermType != 0 {
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(m.PermType))
|
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
||||||
}
|
}
|
||||||
if len(m.Key) > 0 {
|
if len(m.Key) > 0 {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
||||||
i += copy(data[i:], m.Key)
|
i += copy(dAtA[i:], m.Key)
|
||||||
}
|
}
|
||||||
if len(m.RangeEnd) > 0 {
|
if len(m.RangeEnd) > 0 {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
||||||
i += copy(data[i:], m.RangeEnd)
|
i += copy(dAtA[i:], m.RangeEnd)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) Marshal() (data []byte, err error) {
|
func (m *Role) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) MarshalTo(data []byte) (int, error) {
|
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Name) > 0 {
|
if len(m.Name) > 0 {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
i += copy(data[i:], m.Name)
|
i += copy(dAtA[i:], m.Name)
|
||||||
}
|
}
|
||||||
if len(m.KeyPermission) > 0 {
|
if len(m.KeyPermission) > 0 {
|
||||||
for _, msg := range m.KeyPermission {
|
for _, msg := range m.KeyPermission {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(msg.Size()))
|
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -215,31 +217,31 @@ func (m *Role) MarshalTo(data []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Auth(data []byte, offset int, v uint64) int {
|
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Auth(data []byte, offset int, v uint32) int {
|
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintAuth(data []byte, offset int, v uint64) int {
|
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *User) Size() (n int) {
|
func (m *User) Size() (n int) {
|
||||||
@@ -308,8 +310,8 @@ func sovAuth(x uint64) (n int) {
|
|||||||
func sozAuth(x uint64) (n int) {
|
func sozAuth(x uint64) (n int) {
|
||||||
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
}
|
}
|
||||||
func (m *User) Unmarshal(data []byte) error {
|
func (m *User) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -321,7 +323,7 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -349,7 +351,7 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -363,7 +365,7 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Name == nil {
|
if m.Name == nil {
|
||||||
m.Name = []byte{}
|
m.Name = []byte{}
|
||||||
}
|
}
|
||||||
@@ -380,7 +382,7 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -394,7 +396,7 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Password = append(m.Password[:0], data[iNdEx:postIndex]...)
|
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Password == nil {
|
if m.Password == nil {
|
||||||
m.Password = []byte{}
|
m.Password = []byte{}
|
||||||
}
|
}
|
||||||
@@ -411,7 +413,7 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -426,11 +428,11 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
|
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipAuth(data[iNdEx:])
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -449,8 +451,8 @@ func (m *User) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *Permission) Unmarshal(data []byte) error {
|
func (m *Permission) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -462,7 +464,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -490,7 +492,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -509,7 +511,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -523,7 +525,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
|
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Key == nil {
|
if m.Key == nil {
|
||||||
m.Key = []byte{}
|
m.Key = []byte{}
|
||||||
}
|
}
|
||||||
@@ -540,7 +542,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -554,14 +556,14 @@ func (m *Permission) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
|
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.RangeEnd == nil {
|
if m.RangeEnd == nil {
|
||||||
m.RangeEnd = []byte{}
|
m.RangeEnd = []byte{}
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipAuth(data[iNdEx:])
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -580,8 +582,8 @@ func (m *Permission) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *Role) Unmarshal(data []byte) error {
|
func (m *Role) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -593,7 +595,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -621,7 +623,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -635,7 +637,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Name == nil {
|
if m.Name == nil {
|
||||||
m.Name = []byte{}
|
m.Name = []byte{}
|
||||||
}
|
}
|
||||||
@@ -652,7 +654,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -667,13 +669,13 @@ func (m *Role) Unmarshal(data []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
||||||
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipAuth(data[iNdEx:])
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -692,8 +694,8 @@ func (m *Role) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipAuth(data []byte) (n int, err error) {
|
func skipAuth(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -704,7 +706,7 @@ func skipAuth(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -722,7 +724,7 @@ func skipAuth(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -739,7 +741,7 @@ func skipAuth(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -762,7 +764,7 @@ func skipAuth(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -773,7 +775,7 @@ func skipAuth(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipAuth(data[start:])
|
next, err := skipAuth(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -797,6 +799,8 @@ var (
|
|||||||
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||||
|
|
||||||
var fileDescriptorAuth = []byte{
|
var fileDescriptorAuth = []byte{
|
||||||
// 288 bytes of a gzipped FileDescriptorProto
|
// 288 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||||
|
|||||||
49
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
49
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
@@ -49,46 +49,37 @@ func isRangeEqual(a, b *rangePerm) bool {
|
|||||||
|
|
||||||
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
|
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
|
||||||
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
||||||
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
// It returns a sorted rangePerm slice.
|
||||||
// TODO(mitake): currently it is O(n^2), we need a better algorithm
|
func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) {
|
||||||
newp := make([]*rangePerm, 0)
|
sort.Sort(RangePermSliceByBegin(perms))
|
||||||
|
var prev *rangePerm
|
||||||
for i := range perms {
|
for i := range perms {
|
||||||
skip := false
|
if i == 0 {
|
||||||
|
prev = perms[i]
|
||||||
for j := range perms {
|
newp = append(newp, perms[i])
|
||||||
if i == j {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if isRangeEqual(perms[i], perms[j]) {
|
|
||||||
// if ranges are equal, we only keep the first range.
|
|
||||||
if i > j {
|
|
||||||
skip = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if isSubset(perms[i], perms[j]) {
|
|
||||||
// if a range is a strict subset of the other one, we skip the subset.
|
|
||||||
skip = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if skip {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if isRangeEqual(perms[i], prev) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isSubset(perms[i], prev) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isSubset(prev, perms[i]) {
|
||||||
|
prev = perms[i]
|
||||||
|
newp[len(newp)-1] = perms[i]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
prev = perms[i]
|
||||||
newp = append(newp, perms[i])
|
newp = append(newp, perms[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
return newp
|
return newp
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeRangePerms merges adjacent rangePerms.
|
// mergeRangePerms merges adjacent rangePerms.
|
||||||
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
||||||
merged := make([]*rangePerm, 0)
|
var merged []*rangePerm
|
||||||
perms = removeSubsetRangePerms(perms)
|
perms = removeSubsetRangePerms(perms)
|
||||||
sort.Sort(RangePermSliceByBegin(perms))
|
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
for i < len(perms) {
|
for i < len(perms) {
|
||||||
|
|||||||
89
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
89
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
@@ -20,6 +20,9 @@ package auth
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -27,6 +30,73 @@ const (
|
|||||||
defaultSimpleTokenLength = 16
|
defaultSimpleTokenLength = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// var for testing purposes
|
||||||
|
var (
|
||||||
|
simpleTokenTTL = 5 * time.Minute
|
||||||
|
simpleTokenTTLResolution = 1 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type simpleTokenTTLKeeper struct {
|
||||||
|
tokensMu sync.Mutex
|
||||||
|
tokens map[string]time.Time
|
||||||
|
stopCh chan chan struct{}
|
||||||
|
deleteTokenFunc func(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSimpleTokenTTLKeeper(deletefunc func(string)) *simpleTokenTTLKeeper {
|
||||||
|
stk := &simpleTokenTTLKeeper{
|
||||||
|
tokens: make(map[string]time.Time),
|
||||||
|
stopCh: make(chan chan struct{}),
|
||||||
|
deleteTokenFunc: deletefunc,
|
||||||
|
}
|
||||||
|
go stk.run()
|
||||||
|
return stk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) stop() {
|
||||||
|
waitCh := make(chan struct{})
|
||||||
|
tm.stopCh <- waitCh
|
||||||
|
<-waitCh
|
||||||
|
close(tm.stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||||
|
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||||
|
if _, ok := tm.tokens[token]; ok {
|
||||||
|
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
|
||||||
|
delete(tm.tokens, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) run() {
|
||||||
|
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
|
||||||
|
defer tokenTicker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tokenTicker.C:
|
||||||
|
nowtime := time.Now()
|
||||||
|
tm.tokensMu.Lock()
|
||||||
|
for t, tokenendtime := range tm.tokens {
|
||||||
|
if nowtime.After(tokenendtime) {
|
||||||
|
tm.deleteTokenFunc(t)
|
||||||
|
delete(tm.tokens, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tm.tokensMu.Unlock()
|
||||||
|
case waitCh := <-tm.stopCh:
|
||||||
|
tm.tokens = make(map[string]time.Time)
|
||||||
|
waitCh <- struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (as *authStore) GenSimpleToken() (string, error) {
|
func (as *authStore) GenSimpleToken() (string, error) {
|
||||||
ret := make([]byte, defaultSimpleTokenLength)
|
ret := make([]byte, defaultSimpleTokenLength)
|
||||||
|
|
||||||
@@ -43,6 +113,7 @@ func (as *authStore) GenSimpleToken() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||||
|
as.simpleTokenKeeper.tokensMu.Lock()
|
||||||
as.simpleTokensMu.Lock()
|
as.simpleTokensMu.Lock()
|
||||||
|
|
||||||
_, ok := as.simpleTokens[token]
|
_, ok := as.simpleTokens[token]
|
||||||
@@ -51,5 +122,23 @@ func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
as.simpleTokens[token] = username
|
as.simpleTokens[token] = username
|
||||||
|
as.simpleTokenKeeper.addSimpleToken(token)
|
||||||
as.simpleTokensMu.Unlock()
|
as.simpleTokensMu.Unlock()
|
||||||
|
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) invalidateUser(username string) {
|
||||||
|
if as.simpleTokenKeeper == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
as.simpleTokenKeeper.tokensMu.Lock()
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
for token, name := range as.simpleTokens {
|
||||||
|
if strings.Compare(name, username) == 0 {
|
||||||
|
delete(as.simpleTokens, token)
|
||||||
|
as.simpleTokenKeeper.deleteSimpleToken(token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
as.simpleTokensMu.Unlock()
|
||||||
|
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|||||||
341
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
341
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
@@ -16,9 +16,11 @@ package auth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@@ -28,6 +30,7 @@ import (
|
|||||||
"github.com/coreos/pkg/capnslog"
|
"github.com/coreos/pkg/capnslog"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -35,6 +38,8 @@ var (
|
|||||||
authEnabled = []byte{1}
|
authEnabled = []byte{1}
|
||||||
authDisabled = []byte{0}
|
authDisabled = []byte{0}
|
||||||
|
|
||||||
|
revisionKey = []byte("authRevision")
|
||||||
|
|
||||||
authBucketName = []byte("auth")
|
authBucketName = []byte("auth")
|
||||||
authUsersBucketName = []byte("authUsers")
|
authUsersBucketName = []byte("authUsers")
|
||||||
authRolesBucketName = []byte("authRoles")
|
authRolesBucketName = []byte("authRoles")
|
||||||
@@ -44,6 +49,7 @@ var (
|
|||||||
ErrRootUserNotExist = errors.New("auth: root user does not exist")
|
ErrRootUserNotExist = errors.New("auth: root user does not exist")
|
||||||
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
|
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
|
||||||
ErrUserAlreadyExist = errors.New("auth: user already exists")
|
ErrUserAlreadyExist = errors.New("auth: user already exists")
|
||||||
|
ErrUserEmpty = errors.New("auth: user name is empty")
|
||||||
ErrUserNotFound = errors.New("auth: user not found")
|
ErrUserNotFound = errors.New("auth: user not found")
|
||||||
ErrRoleAlreadyExist = errors.New("auth: role already exists")
|
ErrRoleAlreadyExist = errors.New("auth: role already exists")
|
||||||
ErrRoleNotFound = errors.New("auth: role not found")
|
ErrRoleNotFound = errors.New("auth: role not found")
|
||||||
@@ -51,13 +57,26 @@ var (
|
|||||||
ErrPermissionDenied = errors.New("auth: permission denied")
|
ErrPermissionDenied = errors.New("auth: permission denied")
|
||||||
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
||||||
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
||||||
|
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
|
||||||
|
ErrAuthOldRevision = errors.New("auth: revision in header is old")
|
||||||
|
ErrInvalidAuthToken = errors.New("auth: invalid auth token")
|
||||||
|
|
||||||
|
// BcryptCost is the algorithm cost / strength for hashing auth passwords
|
||||||
|
BcryptCost = bcrypt.DefaultCost
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
rootUser = "root"
|
rootUser = "root"
|
||||||
rootRole = "root"
|
rootRole = "root"
|
||||||
|
|
||||||
|
revBytesLen = 8
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type AuthInfo struct {
|
||||||
|
Username string
|
||||||
|
Revision uint64
|
||||||
|
}
|
||||||
|
|
||||||
type AuthStore interface {
|
type AuthStore interface {
|
||||||
// AuthEnable turns on the authentication feature
|
// AuthEnable turns on the authentication feature
|
||||||
AuthEnable() error
|
AuthEnable() error
|
||||||
@@ -110,23 +129,36 @@ type AuthStore interface {
|
|||||||
// RoleList gets a list of all roles
|
// RoleList gets a list of all roles
|
||||||
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||||
|
|
||||||
// UsernameFromToken gets a username from the given Token
|
// AuthInfoFromToken gets a username from the given Token and current revision number
|
||||||
UsernameFromToken(token string) (string, bool)
|
// (The revision number is used for preventing the TOCTOU problem)
|
||||||
|
AuthInfoFromToken(token string) (*AuthInfo, bool)
|
||||||
|
|
||||||
// IsPutPermitted checks put permission of the user
|
// IsPutPermitted checks put permission of the user
|
||||||
IsPutPermitted(username string, key []byte) bool
|
IsPutPermitted(authInfo *AuthInfo, key []byte) error
|
||||||
|
|
||||||
// IsRangePermitted checks range permission of the user
|
// IsRangePermitted checks range permission of the user
|
||||||
IsRangePermitted(username string, key, rangeEnd []byte) bool
|
IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||||
|
|
||||||
// IsDeleteRangePermitted checks delete-range permission of the user
|
// IsDeleteRangePermitted checks delete-range permission of the user
|
||||||
IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool
|
IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||||
|
|
||||||
// IsAdminPermitted checks admin permission of the user
|
// IsAdminPermitted checks admin permission of the user
|
||||||
IsAdminPermitted(username string) bool
|
IsAdminPermitted(authInfo *AuthInfo) error
|
||||||
|
|
||||||
// GenSimpleToken produces a simple random string
|
// GenSimpleToken produces a simple random string
|
||||||
GenSimpleToken() (string, error)
|
GenSimpleToken() (string, error)
|
||||||
|
|
||||||
|
// Revision gets current revision of authStore
|
||||||
|
Revision() uint64
|
||||||
|
|
||||||
|
// CheckPassword checks a given pair of username and password is correct
|
||||||
|
CheckPassword(username, password string) (uint64, error)
|
||||||
|
|
||||||
|
// Close does cleanup of AuthStore
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// AuthInfoFromCtx gets AuthInfo from gRPC's context
|
||||||
|
AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type authStore struct {
|
type authStore struct {
|
||||||
@@ -136,11 +168,33 @@ type authStore struct {
|
|||||||
|
|
||||||
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
||||||
|
|
||||||
simpleTokensMu sync.RWMutex
|
revision uint64
|
||||||
simpleTokens map[string]string // token -> username
|
|
||||||
|
// tokenSimple in v3.2+
|
||||||
|
indexWaiter func(uint64) <-chan struct{}
|
||||||
|
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||||
|
simpleTokensMu sync.Mutex
|
||||||
|
simpleTokens map[string]string // token -> username
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDeleterFunc(as *authStore) func(string) {
|
||||||
|
return func(t string) {
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
defer as.simpleTokensMu.Unlock()
|
||||||
|
if username, ok := as.simpleTokens[t]; ok {
|
||||||
|
plog.Infof("deleting token %s for user %s", t, username)
|
||||||
|
delete(as.simpleTokens, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) AuthEnable() error {
|
func (as *authStore) AuthEnable() error {
|
||||||
|
as.enabledMu.Lock()
|
||||||
|
defer as.enabledMu.Unlock()
|
||||||
|
if as.enabled {
|
||||||
|
plog.Noticef("Authentication already enabled")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
b := as.be
|
b := as.be
|
||||||
tx := b.BatchTx()
|
tx := b.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
@@ -160,33 +214,64 @@ func (as *authStore) AuthEnable() error {
|
|||||||
|
|
||||||
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
||||||
|
|
||||||
as.enabledMu.Lock()
|
|
||||||
as.enabled = true
|
as.enabled = true
|
||||||
as.enabledMu.Unlock()
|
|
||||||
|
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||||
|
|
||||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||||
|
|
||||||
|
as.revision = getRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("Authentication enabled")
|
plog.Noticef("Authentication enabled")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) AuthDisable() {
|
func (as *authStore) AuthDisable() {
|
||||||
|
as.enabledMu.Lock()
|
||||||
|
defer as.enabledMu.Unlock()
|
||||||
|
if !as.enabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
b := as.be
|
b := as.be
|
||||||
tx := b.BatchTx()
|
tx := b.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
|
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
|
||||||
|
as.commitRevision(tx)
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
b.ForceCommit()
|
b.ForceCommit()
|
||||||
|
|
||||||
as.enabledMu.Lock()
|
|
||||||
as.enabled = false
|
as.enabled = false
|
||||||
as.enabledMu.Unlock()
|
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
as.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||||
|
as.simpleTokensMu.Unlock()
|
||||||
|
if as.simpleTokenKeeper != nil {
|
||||||
|
as.simpleTokenKeeper.stop()
|
||||||
|
as.simpleTokenKeeper = nil
|
||||||
|
}
|
||||||
|
|
||||||
plog.Noticef("Authentication disabled")
|
plog.Noticef("Authentication disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) Close() error {
|
||||||
|
as.enabledMu.Lock()
|
||||||
|
defer as.enabledMu.Unlock()
|
||||||
|
if !as.enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if as.simpleTokenKeeper != nil {
|
||||||
|
as.simpleTokenKeeper.stop()
|
||||||
|
as.simpleTokenKeeper = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
||||||
|
if !as.isAuthEnabled() {
|
||||||
|
return nil, ErrAuthNotEnabled
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
||||||
index := ctx.Value("index").(uint64)
|
index := ctx.Value("index").(uint64)
|
||||||
simpleToken := ctx.Value("simpleToken").(string)
|
simpleToken := ctx.Value("simpleToken").(string)
|
||||||
@@ -200,11 +285,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
|||||||
return nil, ErrAuthFailed
|
return nil, ErrAuthFailed
|
||||||
}
|
}
|
||||||
|
|
||||||
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
|
||||||
plog.Noticef("authentication failed, invalid password for user %s", username)
|
|
||||||
return &pb.AuthenticateResponse{}, ErrAuthFailed
|
|
||||||
}
|
|
||||||
|
|
||||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||||
as.assignSimpleTokenToUser(username, token)
|
as.assignSimpleTokenToUser(username, token)
|
||||||
|
|
||||||
@@ -212,6 +292,24 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
|||||||
return &pb.AuthenticateResponse{Token: token}, nil
|
return &pb.AuthenticateResponse{Token: token}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||||
|
tx := as.be.BatchTx()
|
||||||
|
tx.Lock()
|
||||||
|
defer tx.Unlock()
|
||||||
|
|
||||||
|
user := getUser(tx, username)
|
||||||
|
if user == nil {
|
||||||
|
return 0, ErrAuthFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
||||||
|
plog.Noticef("authentication failed, invalid password for user %s", username)
|
||||||
|
return 0, ErrAuthFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
return getRevision(tx), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (as *authStore) Recover(be backend.Backend) {
|
func (as *authStore) Recover(be backend.Backend) {
|
||||||
enabled := false
|
enabled := false
|
||||||
as.be = be
|
as.be = be
|
||||||
@@ -223,6 +321,9 @@ func (as *authStore) Recover(be backend.Backend) {
|
|||||||
enabled = true
|
enabled = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
as.revision = getRevision(tx)
|
||||||
|
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
|
|
||||||
as.enabledMu.Lock()
|
as.enabledMu.Lock()
|
||||||
@@ -231,7 +332,11 @@ func (as *authStore) Recover(be backend.Backend) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
if len(r.Name) == 0 {
|
||||||
|
return nil, ErrUserEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("failed to hash password: %s", err)
|
plog.Errorf("failed to hash password: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -253,6 +358,8 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
|||||||
|
|
||||||
putUser(tx, newUser)
|
putUser(tx, newUser)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("added a new user: %s", r.Name)
|
plog.Noticef("added a new user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserAddResponse{}, nil
|
return &pb.AuthUserAddResponse{}, nil
|
||||||
@@ -270,6 +377,11 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
|||||||
|
|
||||||
delUser(tx, r.Name)
|
delUser(tx, r.Name)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
as.invalidateUser(r.Name)
|
||||||
|
|
||||||
plog.Noticef("deleted a user: %s", r.Name)
|
plog.Noticef("deleted a user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserDeleteResponse{}, nil
|
return &pb.AuthUserDeleteResponse{}, nil
|
||||||
@@ -278,7 +390,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
|||||||
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||||
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
|
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
|
||||||
// If the cost is too high, we should move the encryption to outside of the raft
|
// If the cost is too high, we should move the encryption to outside of the raft
|
||||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("failed to hash password: %s", err)
|
plog.Errorf("failed to hash password: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -301,6 +413,11 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
|
|||||||
|
|
||||||
putUser(tx, updatedUser)
|
putUser(tx, updatedUser)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
as.invalidateUser(r.Name)
|
||||||
|
|
||||||
plog.Noticef("changed a password of a user: %s", r.Name)
|
plog.Noticef("changed a password of a user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserChangePasswordResponse{}, nil
|
return &pb.AuthUserChangePasswordResponse{}, nil
|
||||||
@@ -336,6 +453,8 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
|||||||
|
|
||||||
as.invalidateCachedPerm(r.User)
|
as.invalidateCachedPerm(r.User)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("granted role %s to user %s", r.Role, r.User)
|
plog.Noticef("granted role %s to user %s", r.Role, r.User)
|
||||||
return &pb.AuthUserGrantRoleResponse{}, nil
|
return &pb.AuthUserGrantRoleResponse{}, nil
|
||||||
}
|
}
|
||||||
@@ -351,11 +470,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
|
|||||||
if user == nil {
|
if user == nil {
|
||||||
return nil, ErrUserNotFound
|
return nil, ErrUserNotFound
|
||||||
}
|
}
|
||||||
|
resp.Roles = append(resp.Roles, user.Roles...)
|
||||||
for _, role := range user.Roles {
|
|
||||||
resp.Roles = append(resp.Roles, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -404,6 +519,8 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
|
|||||||
|
|
||||||
as.invalidateCachedPerm(r.Name)
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
|
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
|
||||||
return &pb.AuthUserRevokeRoleResponse{}, nil
|
return &pb.AuthUserRevokeRoleResponse{}, nil
|
||||||
}
|
}
|
||||||
@@ -419,11 +536,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
|
|||||||
if role == nil {
|
if role == nil {
|
||||||
return nil, ErrRoleNotFound
|
return nil, ErrRoleNotFound
|
||||||
}
|
}
|
||||||
|
resp.Perm = append(resp.Perm, role.KeyPermission...)
|
||||||
for _, perm := range role.KeyPermission {
|
|
||||||
resp.Perm = append(resp.Perm, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -473,6 +586,8 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
|
|||||||
// It should be optimized.
|
// It should be optimized.
|
||||||
as.clearCachedPerm()
|
as.clearCachedPerm()
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
|
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
|
||||||
return &pb.AuthRoleRevokePermissionResponse{}, nil
|
return &pb.AuthRoleRevokePermissionResponse{}, nil
|
||||||
}
|
}
|
||||||
@@ -501,6 +616,8 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
|
|||||||
|
|
||||||
delRole(tx, r.Role)
|
delRole(tx, r.Role)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("deleted role %s", r.Role)
|
plog.Noticef("deleted role %s", r.Role)
|
||||||
return &pb.AuthRoleDeleteResponse{}, nil
|
return &pb.AuthRoleDeleteResponse{}, nil
|
||||||
}
|
}
|
||||||
@@ -521,16 +638,24 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
|||||||
|
|
||||||
putRole(tx, newRole)
|
putRole(tx, newRole)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("Role %s is created", r.Name)
|
plog.Noticef("Role %s is created", r.Name)
|
||||||
|
|
||||||
return &pb.AuthRoleAddResponse{}, nil
|
return &pb.AuthRoleAddResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) UsernameFromToken(token string) (string, bool) {
|
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
|
||||||
as.simpleTokensMu.RLock()
|
// same as '(t *tokenSimple) info' in v3.2+
|
||||||
defer as.simpleTokensMu.RUnlock()
|
as.simpleTokenKeeper.tokensMu.Lock()
|
||||||
t, ok := as.simpleTokens[token]
|
as.simpleTokensMu.Lock()
|
||||||
return t, ok
|
username, ok := as.simpleTokens[token]
|
||||||
|
if ok {
|
||||||
|
as.simpleTokenKeeper.resetSimpleToken(token)
|
||||||
|
}
|
||||||
|
as.simpleTokensMu.Unlock()
|
||||||
|
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||||
|
return &AuthInfo{Username: username, Revision: as.revision}, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
type permSlice []*authpb.Permission
|
type permSlice []*authpb.Permission
|
||||||
@@ -582,15 +707,26 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
|
|||||||
// It should be optimized.
|
// It should be optimized.
|
||||||
as.clearCachedPerm()
|
as.clearCachedPerm()
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
|
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
|
||||||
|
|
||||||
return &pb.AuthRoleGrantPermissionResponse{}, nil
|
return &pb.AuthRoleGrantPermissionResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTyp authpb.Permission_Type) bool {
|
func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
|
||||||
// TODO(mitake): this function would be costly so we need a caching mechanism
|
// TODO(mitake): this function would be costly so we need a caching mechanism
|
||||||
if !as.isAuthEnabled() {
|
if !as.isAuthEnabled() {
|
||||||
return true
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// only gets rev == 0 when passed AuthInfo{}; no user given
|
||||||
|
if revision == 0 {
|
||||||
|
return ErrUserEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
if revision < as.revision {
|
||||||
|
return ErrAuthOldRevision
|
||||||
}
|
}
|
||||||
|
|
||||||
tx := as.be.BatchTx()
|
tx := as.be.BatchTx()
|
||||||
@@ -600,48 +736,55 @@ func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTy
|
|||||||
user := getUser(tx, userName)
|
user := getUser(tx, userName)
|
||||||
if user == nil {
|
if user == nil {
|
||||||
plog.Errorf("invalid user name %s for permission checking", userName)
|
plog.Errorf("invalid user name %s for permission checking", userName)
|
||||||
return false
|
return ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
// root role should have permission on all ranges
|
// root role should have permission on all ranges
|
||||||
if hasRootRole(user) {
|
if hasRootRole(user) {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
|
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsPutPermitted(username string, key []byte) bool {
|
func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
|
||||||
return as.isOpPermitted(username, key, nil, authpb.WRITE)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsRangePermitted(username string, key, rangeEnd []byte) bool {
|
func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||||
return as.isOpPermitted(username, key, rangeEnd, authpb.READ)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool {
|
func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||||
return as.isOpPermitted(username, key, rangeEnd, authpb.WRITE)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsAdminPermitted(username string) bool {
|
func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||||
if !as.isAuthEnabled() {
|
if !as.isAuthEnabled() {
|
||||||
return true
|
return nil
|
||||||
|
}
|
||||||
|
if authInfo == nil {
|
||||||
|
return ErrUserEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
tx := as.be.BatchTx()
|
tx := as.be.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
defer tx.Unlock()
|
defer tx.Unlock()
|
||||||
|
|
||||||
u := getUser(tx, username)
|
u := getUser(tx, authInfo.Username)
|
||||||
if u == nil {
|
if u == nil {
|
||||||
return false
|
return ErrUserNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return hasRootRole(u)
|
if !hasRootRole(u) {
|
||||||
|
return ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUser(tx backend.BatchTx, username string) *authpb.User {
|
func getUser(tx backend.BatchTx, username string) *authpb.User {
|
||||||
@@ -745,7 +888,7 @@ func (as *authStore) isAuthEnabled() bool {
|
|||||||
return as.enabled
|
return as.enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAuthStore(be backend.Backend) *authStore {
|
func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore {
|
||||||
tx := be.BatchTx()
|
tx := be.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
|
|
||||||
@@ -753,13 +896,35 @@ func NewAuthStore(be backend.Backend) *authStore {
|
|||||||
tx.UnsafeCreateBucket(authUsersBucketName)
|
tx.UnsafeCreateBucket(authUsersBucketName)
|
||||||
tx.UnsafeCreateBucket(authRolesBucketName)
|
tx.UnsafeCreateBucket(authRolesBucketName)
|
||||||
|
|
||||||
|
enabled := false
|
||||||
|
_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
|
||||||
|
if len(vs) == 1 {
|
||||||
|
if bytes.Equal(vs[0], authEnabled) {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
as := &authStore{
|
||||||
|
be: be,
|
||||||
|
simpleTokens: make(map[string]string),
|
||||||
|
revision: getRevision(tx),
|
||||||
|
indexWaiter: indexWaiter,
|
||||||
|
enabled: enabled,
|
||||||
|
rangePermCache: make(map[string]*unifiedRangePermissions),
|
||||||
|
}
|
||||||
|
|
||||||
|
if enabled {
|
||||||
|
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||||
|
}
|
||||||
|
|
||||||
|
if as.revision == 0 {
|
||||||
|
as.commitRevision(tx)
|
||||||
|
}
|
||||||
|
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
be.ForceCommit()
|
be.ForceCommit()
|
||||||
|
|
||||||
return &authStore{
|
return as
|
||||||
be: be,
|
|
||||||
simpleTokens: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasRootRole(u *authpb.User) bool {
|
func hasRootRole(u *authpb.User) bool {
|
||||||
@@ -770,3 +935,67 @@ func hasRootRole(u *authpb.User) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||||
|
as.revision++
|
||||||
|
revBytes := make([]byte, revBytesLen)
|
||||||
|
binary.BigEndian.PutUint64(revBytes, as.revision)
|
||||||
|
tx.UnsafePut(authBucketName, revisionKey, revBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRevision(tx backend.BatchTx) uint64 {
|
||||||
|
_, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
|
||||||
|
if len(vs) != 1 {
|
||||||
|
// this can happen in the initialization phase
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return binary.BigEndian.Uint64(vs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) Revision() uint64 {
|
||||||
|
return as.revision
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool {
|
||||||
|
splitted := strings.Split(token, ".")
|
||||||
|
if len(splitted) != 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
index, err := strconv.Atoi(splitted[1])
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-as.indexWaiter(uint64(index)):
|
||||||
|
return true
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||||
|
md, ok := metadata.FromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, tok := md["token"]
|
||||||
|
if !tok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
token := ts[0]
|
||||||
|
if !as.isValidSimpleToken(token, ctx) {
|
||||||
|
return nil, ErrInvalidAuthToken
|
||||||
|
}
|
||||||
|
|
||||||
|
authInfo, uok := as.AuthInfoFromToken(token)
|
||||||
|
if !uok {
|
||||||
|
plog.Warningf("invalid auth token: %s", token)
|
||||||
|
return nil, ErrInvalidAuthToken
|
||||||
|
}
|
||||||
|
return authInfo, nil
|
||||||
|
}
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/client/README.md
generated
vendored
2
vendor/github.com/coreos/etcd/client/README.md
generated
vendored
@@ -114,4 +114,4 @@ if err != nil {
|
|||||||
|
|
||||||
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
|
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
|
||||||
|
|
||||||
4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265).
|
4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
|
||||||
|
|||||||
147
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
147
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
@@ -22,7 +22,6 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -261,53 +260,67 @@ type httpClusterClient struct {
|
|||||||
selectionMode EndpointSelectionMode
|
selectionMode EndpointSelectionMode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
|
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
|
||||||
mAPI := NewMembersAPI(c)
|
ceps := make([]url.URL, len(eps))
|
||||||
leader, err := mAPI.Leader(context.Background())
|
copy(ceps, eps)
|
||||||
|
|
||||||
|
// To perform a lookup on the new endpoint list without using the current
|
||||||
|
// client, we'll copy it
|
||||||
|
clientCopy := &httpClusterClient{
|
||||||
|
clientFactory: c.clientFactory,
|
||||||
|
credentials: c.credentials,
|
||||||
|
rand: c.rand,
|
||||||
|
|
||||||
|
pinned: 0,
|
||||||
|
endpoints: ceps,
|
||||||
|
}
|
||||||
|
|
||||||
|
mAPI := NewMembersAPI(clientCopy)
|
||||||
|
leader, err := mAPI.Leader(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
if len(leader.ClientURLs) == 0 {
|
||||||
|
return "", ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
|
||||||
if len(eps) == 0 {
|
if len(eps) == 0 {
|
||||||
return ErrNoEndpoints
|
return []url.URL{}, ErrNoEndpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
neps := make([]url.URL, len(eps))
|
neps := make([]url.URL, len(eps))
|
||||||
for i, ep := range eps {
|
for i, ep := range eps {
|
||||||
u, err := url.Parse(ep)
|
u, err := url.Parse(ep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return []url.URL{}, err
|
||||||
}
|
}
|
||||||
neps[i] = *u
|
neps[i] = *u
|
||||||
}
|
}
|
||||||
|
return neps, nil
|
||||||
|
}
|
||||||
|
|
||||||
switch c.selectionMode {
|
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||||
case EndpointSelectionRandom:
|
neps, err := c.parseEndpoints(eps)
|
||||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
if err != nil {
|
||||||
c.pinned = 0
|
return err
|
||||||
case EndpointSelectionPrioritizeLeader:
|
|
||||||
c.endpoints = neps
|
|
||||||
lep, err := c.getLeaderEndpoint()
|
|
||||||
if err != nil {
|
|
||||||
return ErrNoLeaderEndpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range c.endpoints {
|
|
||||||
if c.endpoints[i].String() == lep {
|
|
||||||
c.pinned = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If endpoints doesn't have the lu, just keep c.pinned = 0.
|
|
||||||
// Forwarding between follower and leader would be required but it works.
|
|
||||||
default:
|
|
||||||
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||||
|
// We're not doing anything for PrioritizeLeader here. This is
|
||||||
|
// due to not having a context meaning we can't call getLeaderEndpoint
|
||||||
|
// However, if you're using PrioritizeLeader, you've already been told
|
||||||
|
// to regularly call sync, where we do have a ctx, and can figure the
|
||||||
|
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
|
||||||
|
// with it
|
||||||
|
c.pinned = 0
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -401,27 +414,51 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Lock()
|
var eps []string
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
eps := make([]string, 0)
|
|
||||||
for _, m := range ms {
|
for _, m := range ms {
|
||||||
eps = append(eps, m.ClientURLs...)
|
eps = append(eps, m.ClientURLs...)
|
||||||
}
|
}
|
||||||
sort.Sort(sort.StringSlice(eps))
|
|
||||||
|
|
||||||
ceps := make([]string, len(c.endpoints))
|
neps, err := c.parseEndpoints(eps)
|
||||||
for i, cep := range c.endpoints {
|
if err != nil {
|
||||||
ceps[i] = cep.String()
|
return err
|
||||||
}
|
|
||||||
sort.Sort(sort.StringSlice(ceps))
|
|
||||||
// fast path if no change happens
|
|
||||||
// this helps client to pin the endpoint when no cluster change
|
|
||||||
if reflect.DeepEqual(eps, ceps) {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.SetEndpoints(eps)
|
npin := 0
|
||||||
|
|
||||||
|
switch c.selectionMode {
|
||||||
|
case EndpointSelectionRandom:
|
||||||
|
c.RLock()
|
||||||
|
eq := endpointsEqual(c.endpoints, neps)
|
||||||
|
c.RUnlock()
|
||||||
|
|
||||||
|
if eq {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// When items in the endpoint list changes, we choose a new pin
|
||||||
|
neps = shuffleEndpoints(c.rand, neps)
|
||||||
|
case EndpointSelectionPrioritizeLeader:
|
||||||
|
nle, err := c.getLeaderEndpoint(ctx, neps)
|
||||||
|
if err != nil {
|
||||||
|
return ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range neps {
|
||||||
|
if n.String() == nle {
|
||||||
|
npin = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
c.endpoints = neps
|
||||||
|
c.pinned = npin
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||||
@@ -607,3 +644,27 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
|||||||
}
|
}
|
||||||
return neps
|
return neps
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func endpointsEqual(left, right []url.URL) bool {
|
||||||
|
if len(left) != len(right) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
sLeft := make([]string, len(left))
|
||||||
|
sRight := make([]string, len(right))
|
||||||
|
for i, l := range left {
|
||||||
|
sLeft[i] = l.String()
|
||||||
|
}
|
||||||
|
for i, r := range right {
|
||||||
|
sRight[i] = r.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(sLeft)
|
||||||
|
sort.Strings(sRight)
|
||||||
|
for i := range sLeft {
|
||||||
|
if sLeft[i] != sRight[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|||||||
6
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
6
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
@@ -21,7 +21,11 @@ type ClusterError struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Error() string {
|
func (ce *ClusterError) Error() string {
|
||||||
return ErrClusterUnavailable.Error()
|
s := ErrClusterUnavailable.Error()
|
||||||
|
for i, e := range ce.Errors {
|
||||||
|
s += fmt.Sprintf("; error #%d: %s\n", i, e)
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Detail() string {
|
func (ce *ClusterError) Detail() string {
|
||||||
|
|||||||
675
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
675
vendor/github.com/coreos/etcd/client/keys.generated.go
generated
vendored
File diff suppressed because it is too large
Load Diff
32
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
32
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
@@ -191,6 +191,10 @@ type SetOptions struct {
|
|||||||
|
|
||||||
// Dir specifies whether or not this Node should be created as a directory.
|
// Dir specifies whether or not this Node should be created as a directory.
|
||||||
Dir bool
|
Dir bool
|
||||||
|
|
||||||
|
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
|
||||||
|
// If set, the response will only contain the current value when the request fails.
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetOptions struct {
|
type GetOptions struct {
|
||||||
@@ -268,6 +272,10 @@ type Response struct {
|
|||||||
// Index holds the cluster-level index at the time the Response was generated.
|
// Index holds the cluster-level index at the time the Response was generated.
|
||||||
// This index is not tied to the Node(s) contained in this Response.
|
// This index is not tied to the Node(s) contained in this Response.
|
||||||
Index uint64 `json:"-"`
|
Index uint64 `json:"-"`
|
||||||
|
|
||||||
|
// ClusterID holds the cluster-level ID reported by the server. This
|
||||||
|
// should be different for different etcd clusters.
|
||||||
|
ClusterID string `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Node struct {
|
type Node struct {
|
||||||
@@ -335,6 +343,7 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
|
|||||||
act.TTL = opts.TTL
|
act.TTL = opts.TTL
|
||||||
act.Refresh = opts.Refresh
|
act.Refresh = opts.Refresh
|
||||||
act.Dir = opts.Dir
|
act.Dir = opts.Dir
|
||||||
|
act.NoValueOnSuccess = opts.NoValueOnSuccess
|
||||||
}
|
}
|
||||||
|
|
||||||
doCtx := ctx
|
doCtx := ctx
|
||||||
@@ -523,15 +532,16 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type setAction struct {
|
type setAction struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Key string
|
Key string
|
||||||
Value string
|
Value string
|
||||||
PrevValue string
|
PrevValue string
|
||||||
PrevIndex uint64
|
PrevIndex uint64
|
||||||
PrevExist PrevExistType
|
PrevExist PrevExistType
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
Refresh bool
|
Refresh bool
|
||||||
Dir bool
|
Dir bool
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
@@ -565,6 +575,9 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
|||||||
if a.Refresh {
|
if a.Refresh {
|
||||||
form.Add("refresh", "true")
|
form.Add("refresh", "true")
|
||||||
}
|
}
|
||||||
|
if a.NoValueOnSuccess {
|
||||||
|
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
|
||||||
|
}
|
||||||
|
|
||||||
u.RawQuery = params.Encode()
|
u.RawQuery = params.Encode()
|
||||||
body := strings.NewReader(form.Encode())
|
body := strings.NewReader(form.Encode())
|
||||||
@@ -656,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
30
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
30
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
@@ -14,6 +14,20 @@
|
|||||||
|
|
||||||
package client
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
roleNotFoundRegExp *regexp.Regexp
|
||||||
|
userNotFoundRegExp *regexp.Regexp
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
|
||||||
|
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
|
||||||
|
}
|
||||||
|
|
||||||
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||||
func IsKeyNotFound(err error) bool {
|
func IsKeyNotFound(err error) bool {
|
||||||
if cErr, ok := err.(Error); ok {
|
if cErr, ok := err.(Error); ok {
|
||||||
@@ -21,3 +35,19 @@ func IsKeyNotFound(err error) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsRoleNotFound returns true if the error means role not found of v2 API.
|
||||||
|
func IsRoleNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return roleNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUserNotFound returns true if the error means user not found of v2 API.
|
||||||
|
func IsUserNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return userNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
4
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
4
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
@@ -72,6 +72,10 @@ if err != nil {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
||||||
|
|||||||
5
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
5
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
@@ -43,6 +43,7 @@ type (
|
|||||||
AuthRoleListResponse pb.AuthRoleListResponse
|
AuthRoleListResponse pb.AuthRoleListResponse
|
||||||
|
|
||||||
PermissionType authpb.Permission_Type
|
PermissionType authpb.Permission_Type
|
||||||
|
Permission authpb.Permission
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -115,12 +116,12 @@ func NewAuth(c *Client) Auth {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
|
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
|
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
76
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
76
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
@@ -21,8 +21,14 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||||
|
// any active connection to endpoints at the time.
|
||||||
|
// This error is returned only when opts.BlockingWait is true.
|
||||||
|
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||||
|
|
||||||
// simpleBalancer does the bare minimum to expose multiple eps
|
// simpleBalancer does the bare minimum to expose multiple eps
|
||||||
// to the grpc reconnection code path
|
// to the grpc reconnection code path
|
||||||
type simpleBalancer struct {
|
type simpleBalancer struct {
|
||||||
@@ -42,6 +48,11 @@ type simpleBalancer struct {
|
|||||||
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||||
upc chan struct{}
|
upc chan struct{}
|
||||||
|
|
||||||
|
// grpc issues TLS cert checks using the string passed into dial so
|
||||||
|
// that string must be the host. To recover the full scheme://host URL,
|
||||||
|
// have a map from hosts to the original endpoint.
|
||||||
|
host2ep map[string]string
|
||||||
|
|
||||||
// pinAddr is the currently pinned address; set to the empty string on
|
// pinAddr is the currently pinned address; set to the empty string on
|
||||||
// intialization and shutdown.
|
// intialization and shutdown.
|
||||||
pinAddr string
|
pinAddr string
|
||||||
@@ -62,11 +73,12 @@ func newSimpleBalancer(eps []string) *simpleBalancer {
|
|||||||
readyc: make(chan struct{}),
|
readyc: make(chan struct{}),
|
||||||
upEps: make(map[string]struct{}),
|
upEps: make(map[string]struct{}),
|
||||||
upc: make(chan struct{}),
|
upc: make(chan struct{}),
|
||||||
|
host2ep: getHost2ep(eps),
|
||||||
}
|
}
|
||||||
return sb
|
return sb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *simpleBalancer) Start(target string) error { return nil }
|
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||||
|
|
||||||
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
@@ -74,6 +86,49 @@ func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
|||||||
return b.upc
|
return b.upc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) getEndpoint(host string) string {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return b.host2ep[host]
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHost2ep(eps []string) map[string]string {
|
||||||
|
hm := make(map[string]string, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
_, host, _ := parseEndpoint(eps[i])
|
||||||
|
hm[host] = eps[i]
|
||||||
|
}
|
||||||
|
return hm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) updateAddrs(eps []string) {
|
||||||
|
np := getHost2ep(eps)
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
match := len(np) == len(b.host2ep)
|
||||||
|
for k, v := range np {
|
||||||
|
if b.host2ep[k] != v {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
// same endpoints, so no need to update address
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b.host2ep = np
|
||||||
|
|
||||||
|
addrs := make([]grpc.Address, 0, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
||||||
|
}
|
||||||
|
b.addrs = addrs
|
||||||
|
b.notifyCh <- addrs
|
||||||
|
}
|
||||||
|
|
||||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
@@ -113,6 +168,25 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
|||||||
|
|
||||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||||
var addr string
|
var addr string
|
||||||
|
|
||||||
|
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||||
|
// an address it has notified via Notify immediately instead of blocking.
|
||||||
|
if !opts.BlockingWait {
|
||||||
|
b.mu.RLock()
|
||||||
|
closed := b.closed
|
||||||
|
addr = b.pinAddr
|
||||||
|
upEps := len(b.upEps)
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if closed {
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
|
||||||
|
if upEps == 0 {
|
||||||
|
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||||
|
}
|
||||||
|
return grpc.Address{Addr: addr}, func() {}, nil
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
b.mu.RLock()
|
b.mu.RLock()
|
||||||
ch := b.upc
|
ch := b.upc
|
||||||
|
|||||||
198
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
198
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@@ -18,17 +18,18 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
@@ -46,11 +47,12 @@ type Client struct {
|
|||||||
Auth
|
Auth
|
||||||
Maintenance
|
Maintenance
|
||||||
|
|
||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
cfg Config
|
cfg Config
|
||||||
creds *credentials.TransportCredentials
|
creds *credentials.TransportCredentials
|
||||||
balancer *simpleBalancer
|
balancer *simpleBalancer
|
||||||
retryWrapper retryRpcFunc
|
retryWrapper retryRpcFunc
|
||||||
|
retryAuthWrapper retryRpcFunc
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@@ -59,6 +61,8 @@ type Client struct {
|
|||||||
Username string
|
Username string
|
||||||
// Password is a password for authentication
|
// Password is a password for authentication
|
||||||
Password string
|
Password string
|
||||||
|
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||||
|
tokenCred *authTokenCredential
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new etcdv3 client from a given configuration.
|
// New creates a new etcdv3 client from a given configuration.
|
||||||
@@ -87,6 +91,8 @@ func NewFromConfigFile(path string) (*Client, error) {
|
|||||||
// Close shuts down the client's etcd connections.
|
// Close shuts down the client's etcd connections.
|
||||||
func (c *Client) Close() error {
|
func (c *Client) Close() error {
|
||||||
c.cancel()
|
c.cancel()
|
||||||
|
c.Watcher.Close()
|
||||||
|
c.Lease.Close()
|
||||||
return toErr(c.ctx, c.conn.Close())
|
return toErr(c.ctx, c.conn.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,10 +102,54 @@ func (c *Client) Close() error {
|
|||||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||||
|
|
||||||
// Endpoints lists the registered endpoints for the client.
|
// Endpoints lists the registered endpoints for the client.
|
||||||
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
|
func (c *Client) Endpoints() (eps []string) {
|
||||||
|
// copy the slice; protect original endpoints from being changed
|
||||||
|
eps = make([]string, len(c.cfg.Endpoints))
|
||||||
|
copy(eps, c.cfg.Endpoints)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEndpoints updates client's endpoints.
|
||||||
|
func (c *Client) SetEndpoints(eps ...string) {
|
||||||
|
c.cfg.Endpoints = eps
|
||||||
|
c.balancer.updateAddrs(eps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||||
|
func (c *Client) Sync(ctx context.Context) error {
|
||||||
|
mresp, err := c.MemberList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var eps []string
|
||||||
|
for _, m := range mresp.Members {
|
||||||
|
eps = append(eps, m.ClientURLs...)
|
||||||
|
}
|
||||||
|
c.SetEndpoints(eps...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) autoSync() {
|
||||||
|
if c.cfg.AutoSyncInterval == time.Duration(0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(c.cfg.AutoSyncInterval):
|
||||||
|
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||||
|
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||||
|
logger.Println("Auto sync endpoints failed:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type authTokenCredential struct {
|
type authTokenCredential struct {
|
||||||
token string
|
token string
|
||||||
|
tokenMu *sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||||
@@ -107,24 +157,38 @@ func (cred authTokenCredential) RequireTransportSecurity() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||||
|
cred.tokenMu.RLock()
|
||||||
|
defer cred.tokenMu.RUnlock()
|
||||||
return map[string]string{
|
return map[string]string{
|
||||||
"token": cred.token,
|
"token": cred.token,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) {
|
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||||
proto = "tcp"
|
proto = "tcp"
|
||||||
host = endpoint
|
host = endpoint
|
||||||
creds = c.creds
|
|
||||||
url, uerr := url.Parse(endpoint)
|
url, uerr := url.Parse(endpoint)
|
||||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
scheme = url.Scheme
|
||||||
|
|
||||||
// strip scheme:// prefix since grpc dials by host
|
// strip scheme:// prefix since grpc dials by host
|
||||||
host = url.Host
|
host = url.Host
|
||||||
switch url.Scheme {
|
switch url.Scheme {
|
||||||
|
case "http", "https":
|
||||||
case "unix":
|
case "unix":
|
||||||
proto = "unix"
|
proto = "unix"
|
||||||
|
default:
|
||||||
|
proto, host = "", ""
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||||
|
creds = c.creds
|
||||||
|
switch scheme {
|
||||||
|
case "unix":
|
||||||
case "http":
|
case "http":
|
||||||
creds = nil
|
creds = nil
|
||||||
case "https":
|
case "https":
|
||||||
@@ -135,7 +199,7 @@ func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *
|
|||||||
emptyCreds := credentials.NewTLS(tlsconfig)
|
emptyCreds := credentials.NewTLS(tlsconfig)
|
||||||
creds = &emptyCreds
|
creds = &emptyCreds
|
||||||
default:
|
default:
|
||||||
return "", "", nil
|
creds = nil
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -147,17 +211,8 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
|||||||
}
|
}
|
||||||
opts = append(opts, dopts...)
|
opts = append(opts, dopts...)
|
||||||
|
|
||||||
// grpc issues TLS cert checks using the string passed into dial so
|
|
||||||
// that string must be the host. To recover the full scheme://host URL,
|
|
||||||
// have a map from hosts to the original endpoint.
|
|
||||||
host2ep := make(map[string]string)
|
|
||||||
for i := range c.cfg.Endpoints {
|
|
||||||
_, host, _ := c.dialTarget(c.cfg.Endpoints[i])
|
|
||||||
host2ep[host] = c.cfg.Endpoints[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||||
proto, host, _ := c.dialTarget(host2ep[host])
|
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||||
if proto == "" {
|
if proto == "" {
|
||||||
return nil, fmt.Errorf("unknown scheme for %q", host)
|
return nil, fmt.Errorf("unknown scheme for %q", host)
|
||||||
}
|
}
|
||||||
@@ -166,11 +221,15 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
|||||||
return nil, c.ctx.Err()
|
return nil, c.ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
return net.DialTimeout(proto, host, t)
|
dialer := &net.Dialer{Timeout: t}
|
||||||
|
return dialer.DialContext(c.ctx, proto, host)
|
||||||
}
|
}
|
||||||
opts = append(opts, grpc.WithDialer(f))
|
opts = append(opts, grpc.WithDialer(f))
|
||||||
|
|
||||||
_, _, creds := c.dialTarget(endpoint)
|
creds := c.creds
|
||||||
|
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
|
||||||
|
creds = c.processCreds(scheme)
|
||||||
|
}
|
||||||
if creds != nil {
|
if creds != nil {
|
||||||
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
||||||
} else {
|
} else {
|
||||||
@@ -185,24 +244,56 @@ func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
|||||||
return c.dial(endpoint)
|
return c.dial(endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Client) getToken(ctx context.Context) error {
|
||||||
|
var err error // return last error in a case of fail
|
||||||
|
var auth *authenticator
|
||||||
|
|
||||||
|
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||||
|
endpoint := c.cfg.Endpoints[i]
|
||||||
|
host := getHost(endpoint)
|
||||||
|
// use dial options without dopts to avoid reusing the client balancer
|
||||||
|
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer auth.close()
|
||||||
|
|
||||||
|
var resp *AuthenticateResponse
|
||||||
|
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c.tokenCred.tokenMu.Lock()
|
||||||
|
c.tokenCred.token = resp.Token
|
||||||
|
c.tokenCred.tokenMu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||||
host := getHost(endpoint)
|
host := getHost(endpoint)
|
||||||
if c.Username != "" && c.Password != "" {
|
if c.Username != "" && c.Password != "" {
|
||||||
// use dial options without dopts to avoid reusing the client balancer
|
c.tokenCred = &authTokenCredential{
|
||||||
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
|
tokenMu: &sync.RWMutex{},
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
defer auth.close()
|
|
||||||
|
|
||||||
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
|
err := c.getToken(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
|
|
||||||
|
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add metrics options
|
||||||
|
opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
|
||||||
|
opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
|
||||||
|
|
||||||
conn, err := grpc.Dial(host, opts...)
|
conn, err := grpc.Dial(host, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -248,6 +339,7 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
}
|
}
|
||||||
client.conn = conn
|
client.conn = conn
|
||||||
client.retryWrapper = client.newRetryWrapper()
|
client.retryWrapper = client.newRetryWrapper()
|
||||||
|
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||||
|
|
||||||
// wait for a connection
|
// wait for a connection
|
||||||
if cfg.DialTimeout > 0 {
|
if cfg.DialTimeout > 0 {
|
||||||
@@ -272,13 +364,8 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
client.Watcher = NewWatcher(client)
|
client.Watcher = NewWatcher(client)
|
||||||
client.Auth = NewAuth(client)
|
client.Auth = NewAuth(client)
|
||||||
client.Maintenance = NewMaintenance(client)
|
client.Maintenance = NewMaintenance(client)
|
||||||
if cfg.Logger != nil {
|
|
||||||
logger.Set(cfg.Logger)
|
|
||||||
} else {
|
|
||||||
// disable client side grpc by default
|
|
||||||
logger.Set(log.New(ioutil.Discard, "", 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
go client.autoSync()
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,17 +381,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
eErr := rpctypes.Error(err)
|
code := grpc.Code(err)
|
||||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
// Unavailable codes mean the system will be right back.
|
||||||
return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader
|
// (e.g., can't connect, lost leader)
|
||||||
}
|
// Treat Internal codes as if something failed, leaving the
|
||||||
// treat etcdserver errors not recognized by the client as halting
|
// system in an inconsistent state, but retrying could make progress.
|
||||||
return isConnClosing(err) || strings.Contains(err.Error(), "etcdserver:")
|
// (e.g., failed in middle of send, corrupted frame)
|
||||||
}
|
// TODO: are permanent Internal errors possible from grpc?
|
||||||
|
return code != codes.Unavailable && code != codes.Internal
|
||||||
// isConnClosing returns true if the error matches a grpc client closing error
|
|
||||||
func isConnClosing(err error) bool {
|
|
||||||
return strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func toErr(ctx context.Context, err error) error {
|
func toErr(ctx context.Context, err error) error {
|
||||||
@@ -312,12 +396,20 @@ func toErr(ctx context.Context, err error) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err = rpctypes.Error(err)
|
err = rpctypes.Error(err)
|
||||||
switch {
|
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||||
case ctx.Err() != nil && strings.Contains(err.Error(), "context"):
|
return err
|
||||||
err = ctx.Err()
|
}
|
||||||
case strings.Contains(err.Error(), ErrNoAvailableEndpoints.Error()):
|
code := grpc.Code(err)
|
||||||
|
switch code {
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
fallthrough
|
||||||
|
case codes.Canceled:
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
err = ctx.Err()
|
||||||
|
}
|
||||||
|
case codes.Unavailable:
|
||||||
err = ErrNoAvailableEndpoints
|
err = ErrNoAvailableEndpoints
|
||||||
case strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()):
|
case codes.FailedPrecondition:
|
||||||
err = grpc.ErrClientConnClosing
|
err = grpc.ErrClientConnClosing
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
@@ -78,7 +78,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
|
|||||||
// it is safe to retry on update.
|
// it is safe to retry on update.
|
||||||
for {
|
for {
|
||||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||||
resp, err := c.remote.MemberUpdate(ctx, r)
|
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return (*MemberUpdateResponse)(resp), nil
|
return (*MemberUpdateResponse)(resp), nil
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
@@ -36,6 +36,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
|||||||
switch result {
|
switch result {
|
||||||
case "=":
|
case "=":
|
||||||
r = pb.Compare_EQUAL
|
r = pb.Compare_EQUAL
|
||||||
|
case "!=":
|
||||||
|
r = pb.Compare_NOT_EQUAL
|
||||||
case ">":
|
case ">":
|
||||||
r = pb.Compare_GREATER
|
r = pb.Compare_GREATER
|
||||||
case "<":
|
case "<":
|
||||||
|
|||||||
13
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
13
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
@@ -28,15 +28,16 @@ type Config struct {
|
|||||||
// Endpoints is a list of URLs
|
// Endpoints is a list of URLs
|
||||||
Endpoints []string
|
Endpoints []string
|
||||||
|
|
||||||
|
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||||
|
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||||
|
AutoSyncInterval time.Duration
|
||||||
|
|
||||||
// DialTimeout is the timeout for failing to establish a connection.
|
// DialTimeout is the timeout for failing to establish a connection.
|
||||||
DialTimeout time.Duration
|
DialTimeout time.Duration
|
||||||
|
|
||||||
// TLS holds the client secure credentials, if any.
|
// TLS holds the client secure credentials, if any.
|
||||||
TLS *tls.Config
|
TLS *tls.Config
|
||||||
|
|
||||||
// Logger is the logger used by client library.
|
|
||||||
Logger Logger
|
|
||||||
|
|
||||||
// Username is a username for authentication
|
// Username is a username for authentication
|
||||||
Username string
|
Username string
|
||||||
|
|
||||||
@@ -46,6 +47,7 @@ type Config struct {
|
|||||||
|
|
||||||
type yamlConfig struct {
|
type yamlConfig struct {
|
||||||
Endpoints []string `json:"endpoints"`
|
Endpoints []string `json:"endpoints"`
|
||||||
|
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||||
DialTimeout time.Duration `json:"dial-timeout"`
|
DialTimeout time.Duration `json:"dial-timeout"`
|
||||||
InsecureTransport bool `json:"insecure-transport"`
|
InsecureTransport bool `json:"insecure-transport"`
|
||||||
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
||||||
@@ -68,8 +70,9 @@ func configFromFile(fpath string) (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
Endpoints: yc.Endpoints,
|
Endpoints: yc.Endpoints,
|
||||||
DialTimeout: yc.DialTimeout,
|
AutoSyncInterval: yc.AutoSyncInterval,
|
||||||
|
DialTimeout: yc.DialTimeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
if yc.InsecureTransport {
|
if yc.InsecureTransport {
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
@@ -44,7 +44,7 @@
|
|||||||
// etcd client returns 2 types of errors:
|
// etcd client returns 2 types of errors:
|
||||||
//
|
//
|
||||||
// 1. context error: canceled or deadline exceeded.
|
// 1. context error: canceled or deadline exceeded.
|
||||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
|
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||||
//
|
//
|
||||||
// Here is the example code to handle client errors:
|
// Here is the example code to handle client errors:
|
||||||
//
|
//
|
||||||
|
|||||||
23
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
23
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
@@ -85,6 +85,10 @@ func NewKV(c *Client) KV {
|
|||||||
return &kv{remote: RetryKVClient(c)}
|
return &kv{remote: RetryKVClient(c)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||||
|
return &kv{remote: remote}
|
||||||
|
}
|
||||||
|
|
||||||
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||||
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
||||||
return r.put, toErr(ctx, err)
|
return r.put, toErr(ctx, err)
|
||||||
@@ -101,7 +105,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
|
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@@ -121,6 +125,7 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if isHaltErr(ctx, err) {
|
if isHaltErr(ctx, err) {
|
||||||
return resp, toErr(ctx, err)
|
return resp, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@@ -137,21 +142,7 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
|||||||
// TODO: handle other ops
|
// TODO: handle other ops
|
||||||
case tRange:
|
case tRange:
|
||||||
var resp *pb.RangeResponse
|
var resp *pb.RangeResponse
|
||||||
r := &pb.RangeRequest{
|
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||||
Key: op.key,
|
|
||||||
RangeEnd: op.end,
|
|
||||||
Limit: op.limit,
|
|
||||||
Revision: op.rev,
|
|
||||||
Serializable: op.serializable,
|
|
||||||
KeysOnly: op.keysOnly,
|
|
||||||
CountOnly: op.countOnly,
|
|
||||||
}
|
|
||||||
if op.sort != nil {
|
|
||||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
|
||||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false))
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
83
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
83
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
@@ -44,6 +44,21 @@ type LeaseKeepAliveResponse struct {
|
|||||||
TTL int64
|
TTL int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||||
|
type LeaseTimeToLiveResponse struct {
|
||||||
|
*pb.ResponseHeader
|
||||||
|
ID LeaseID `json:"id"`
|
||||||
|
|
||||||
|
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||||
|
TTL int64 `json:"ttl"`
|
||||||
|
|
||||||
|
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||||
|
GrantedTTL int64 `json:"granted-ttl"`
|
||||||
|
|
||||||
|
// Keys is the list of keys attached to this lease.
|
||||||
|
Keys [][]byte `json:"keys"`
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||||
// deadline before the actual TTL is known to the client.
|
// deadline before the actual TTL is known to the client.
|
||||||
@@ -54,6 +69,21 @@ const (
|
|||||||
NoLease LeaseID = 0
|
NoLease LeaseID = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||||
|
//
|
||||||
|
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||||
|
type ErrKeepAliveHalted struct {
|
||||||
|
Reason error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrKeepAliveHalted) Error() string {
|
||||||
|
s := "etcdclient: leases keep alive halted"
|
||||||
|
if e.Reason != nil {
|
||||||
|
s += ": " + e.Reason.Error()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
type Lease interface {
|
type Lease interface {
|
||||||
// Grant creates a new lease.
|
// Grant creates a new lease.
|
||||||
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
||||||
@@ -61,6 +91,9 @@ type Lease interface {
|
|||||||
// Revoke revokes the given lease.
|
// Revoke revokes the given lease.
|
||||||
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
||||||
|
|
||||||
|
// TimeToLive retrieves the lease information of the given lease ID.
|
||||||
|
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||||
|
|
||||||
// KeepAlive keeps the given lease alive forever.
|
// KeepAlive keeps the given lease alive forever.
|
||||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||||
|
|
||||||
@@ -76,8 +109,9 @@ type Lease interface {
|
|||||||
type lessor struct {
|
type lessor struct {
|
||||||
mu sync.Mutex // guards all fields
|
mu sync.Mutex // guards all fields
|
||||||
|
|
||||||
// donec is closed when recvKeepAliveLoop stops
|
// donec is closed and loopErr is set when recvKeepAliveLoop stops
|
||||||
donec chan struct{}
|
donec chan struct{}
|
||||||
|
loopErr error
|
||||||
|
|
||||||
remote pb.LeaseClient
|
remote pb.LeaseClient
|
||||||
|
|
||||||
@@ -141,7 +175,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
|
|||||||
return gresp, nil
|
return gresp, nil
|
||||||
}
|
}
|
||||||
if isHaltErr(cctx, err) {
|
if isHaltErr(cctx, err) {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(cctx, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -164,10 +198,43 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
for {
|
||||||
|
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||||
|
resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
gresp := &LeaseTimeToLiveResponse{
|
||||||
|
ResponseHeader: resp.GetHeader(),
|
||||||
|
ID: LeaseID(resp.ID),
|
||||||
|
TTL: resp.TTL,
|
||||||
|
GrantedTTL: resp.GrantedTTL,
|
||||||
|
Keys: resp.Keys,
|
||||||
|
}
|
||||||
|
return gresp, nil
|
||||||
|
}
|
||||||
|
if isHaltErr(cctx, err) {
|
||||||
|
return nil, toErr(cctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||||
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
||||||
|
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
|
// ensure that recvKeepAliveLoop is still running
|
||||||
|
select {
|
||||||
|
case <-l.donec:
|
||||||
|
err := l.loopErr
|
||||||
|
l.mu.Unlock()
|
||||||
|
close(ch)
|
||||||
|
return ch, ErrKeepAliveHalted{Reason: err}
|
||||||
|
default:
|
||||||
|
}
|
||||||
ka, ok := l.keepAlives[id]
|
ka, ok := l.keepAlives[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
// create fresh keep alive
|
// create fresh keep alive
|
||||||
@@ -275,10 +342,11 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
|||||||
return karesp, nil
|
return karesp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lessor) recvKeepAliveLoop() {
|
func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
close(l.donec)
|
close(l.donec)
|
||||||
|
l.loopErr = gerr
|
||||||
for _, ka := range l.keepAlives {
|
for _, ka := range l.keepAlives {
|
||||||
ka.Close()
|
ka.Close()
|
||||||
}
|
}
|
||||||
@@ -291,13 +359,14 @@ func (l *lessor) recvKeepAliveLoop() {
|
|||||||
resp, err := stream.Recv()
|
resp, err := stream.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isHaltErr(l.stopCtx, err) {
|
if isHaltErr(l.stopCtx, err) {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
stream, serr = l.resetRecv()
|
stream, serr = l.resetRecv()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
l.recvKeepAlive(resp)
|
l.recvKeepAlive(resp)
|
||||||
}
|
}
|
||||||
|
return serr
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||||
@@ -347,7 +416,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// send update to all channels
|
// send update to all channels
|
||||||
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
|
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||||
for _, ch := range ka.chs {
|
for _, ch := range ka.chs {
|
||||||
select {
|
select {
|
||||||
@@ -393,7 +462,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tosend := make([]LeaseID, 0)
|
var tosend []LeaseID
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
|
|||||||
40
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
40
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
@@ -15,13 +15,15 @@
|
|||||||
package clientv3
|
package clientv3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Logger is the logger used by client library.
|
||||||
|
// It implements grpclog.Logger interface.
|
||||||
type Logger grpclog.Logger
|
type Logger grpclog.Logger
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -34,20 +36,36 @@ type settableLogger struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// use go's standard logger by default like grpc
|
// disable client side logs by default
|
||||||
logger.mu.Lock()
|
logger.mu.Lock()
|
||||||
logger.l = log.New(os.Stderr, "", log.LstdFlags)
|
logger.l = log.New(ioutil.Discard, "", 0)
|
||||||
|
|
||||||
|
// logger has to override the grpclog at initialization so that
|
||||||
|
// any changes to the grpclog go through logger with locking
|
||||||
|
// instead of through SetLogger
|
||||||
|
//
|
||||||
|
// now updates only happen through settableLogger.set
|
||||||
grpclog.SetLogger(&logger)
|
grpclog.SetLogger(&logger)
|
||||||
logger.mu.Unlock()
|
logger.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *settableLogger) Set(l Logger) {
|
// SetLogger sets client-side Logger. By default, logs are disabled.
|
||||||
|
func SetLogger(l Logger) {
|
||||||
|
logger.set(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogger returns the current logger.
|
||||||
|
func GetLogger() Logger {
|
||||||
|
return logger.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *settableLogger) set(l Logger) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
logger.l = l
|
logger.l = l
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *settableLogger) Get() Logger {
|
func (s *settableLogger) get() Logger {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
l := logger.l
|
l := logger.l
|
||||||
s.mu.RUnlock()
|
s.mu.RUnlock()
|
||||||
@@ -56,9 +74,9 @@ func (s *settableLogger) Get() Logger {
|
|||||||
|
|
||||||
// implement the grpclog.Logger interface
|
// implement the grpclog.Logger interface
|
||||||
|
|
||||||
func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) }
|
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) }
|
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) }
|
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||||
func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) }
|
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
||||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) }
|
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
||||||
func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) }
|
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
||||||
|
|||||||
145
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
145
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
@@ -14,9 +14,7 @@
|
|||||||
|
|
||||||
package clientv3
|
package clientv3
|
||||||
|
|
||||||
import (
|
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type opType int
|
type opType int
|
||||||
|
|
||||||
@@ -43,6 +41,10 @@ type Op struct {
|
|||||||
serializable bool
|
serializable bool
|
||||||
keysOnly bool
|
keysOnly bool
|
||||||
countOnly bool
|
countOnly bool
|
||||||
|
minModRev int64
|
||||||
|
maxModRev int64
|
||||||
|
minCreateRev int64
|
||||||
|
maxCreateRev int64
|
||||||
|
|
||||||
// for range, watch
|
// for range, watch
|
||||||
rev int64
|
rev int64
|
||||||
@@ -52,29 +54,45 @@ type Op struct {
|
|||||||
|
|
||||||
// progressNotify is for progress updates.
|
// progressNotify is for progress updates.
|
||||||
progressNotify bool
|
progressNotify bool
|
||||||
|
// createdNotify is for created event
|
||||||
|
createdNotify bool
|
||||||
|
// filters for watchers
|
||||||
|
filterPut bool
|
||||||
|
filterDelete bool
|
||||||
|
|
||||||
// for put
|
// for put
|
||||||
val []byte
|
val []byte
|
||||||
leaseID LeaseID
|
leaseID LeaseID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||||
|
if op.t != tRange {
|
||||||
|
panic("op.t != tRange")
|
||||||
|
}
|
||||||
|
r := &pb.RangeRequest{
|
||||||
|
Key: op.key,
|
||||||
|
RangeEnd: op.end,
|
||||||
|
Limit: op.limit,
|
||||||
|
Revision: op.rev,
|
||||||
|
Serializable: op.serializable,
|
||||||
|
KeysOnly: op.keysOnly,
|
||||||
|
CountOnly: op.countOnly,
|
||||||
|
MinModRevision: op.minModRev,
|
||||||
|
MaxModRevision: op.maxModRev,
|
||||||
|
MinCreateRevision: op.minCreateRev,
|
||||||
|
MaxCreateRevision: op.maxCreateRev,
|
||||||
|
}
|
||||||
|
if op.sort != nil {
|
||||||
|
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||||
|
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func (op Op) toRequestOp() *pb.RequestOp {
|
func (op Op) toRequestOp() *pb.RequestOp {
|
||||||
switch op.t {
|
switch op.t {
|
||||||
case tRange:
|
case tRange:
|
||||||
r := &pb.RangeRequest{
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||||
Key: op.key,
|
|
||||||
RangeEnd: op.end,
|
|
||||||
Limit: op.limit,
|
|
||||||
Revision: op.rev,
|
|
||||||
Serializable: op.serializable,
|
|
||||||
KeysOnly: op.keysOnly,
|
|
||||||
CountOnly: op.countOnly,
|
|
||||||
}
|
|
||||||
if op.sort != nil {
|
|
||||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
|
||||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
|
||||||
}
|
|
||||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}}
|
|
||||||
case tPut:
|
case tPut:
|
||||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
||||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||||
@@ -112,6 +130,14 @@ func OpDelete(key string, opts ...OpOption) Op {
|
|||||||
panic("unexpected serializable in delete")
|
panic("unexpected serializable in delete")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in delete")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in delete")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in delete")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in delete")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in delete")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@@ -131,7 +157,15 @@ func OpPut(key, val string, opts ...OpOption) Op {
|
|||||||
case ret.serializable:
|
case ret.serializable:
|
||||||
panic("unexpected serializable in put")
|
panic("unexpected serializable in put")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in put")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in put")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in put")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in put")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in put")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@@ -149,7 +183,11 @@ func opWatch(key string, opts ...OpOption) Op {
|
|||||||
case ret.serializable:
|
case ret.serializable:
|
||||||
panic("unexpected serializable in watch")
|
panic("unexpected serializable in watch")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in watch")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in watch")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in watch")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@@ -181,6 +219,14 @@ func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
|
|||||||
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
||||||
func WithSort(target SortTarget, order SortOrder) OpOption {
|
func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
|
if target == SortByKey && order == SortAscend {
|
||||||
|
// If order != SortNone, server fetches the entire key-space,
|
||||||
|
// and then applies the sort and limit, if provided.
|
||||||
|
// Since current mvcc.Range implementation returns results
|
||||||
|
// sorted by keys in lexicographically ascending order,
|
||||||
|
// client should ignore SortOrder if the target is SortByKey.
|
||||||
|
order = SortNone
|
||||||
|
}
|
||||||
op.sort = &SortOption{target, order}
|
op.sort = &SortOption{target, order}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -245,6 +291,18 @@ func WithCountOnly() OpOption {
|
|||||||
return func(op *Op) { op.countOnly = true }
|
return func(op *Op) { op.countOnly = true }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
|
||||||
|
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
|
||||||
|
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
|
||||||
|
|
||||||
|
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
|
||||||
|
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
|
||||||
|
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
|
||||||
|
|
||||||
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
||||||
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
||||||
|
|
||||||
@@ -268,7 +326,8 @@ func withTop(target SortTarget, order SortOrder) []OpOption {
|
|||||||
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithProgressNotify makes watch server send periodic progress updates.
|
// WithProgressNotify makes watch server send periodic progress updates
|
||||||
|
// every 10 minutes when there is no incoming events.
|
||||||
// Progress updates have zero events in WatchResponse.
|
// Progress updates have zero events in WatchResponse.
|
||||||
func WithProgressNotify() OpOption {
|
func WithProgressNotify() OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
@@ -276,6 +335,23 @@ func WithProgressNotify() OpOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithCreatedNotify makes watch server sends the created event.
|
||||||
|
func WithCreatedNotify() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.createdNotify = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterPut discards PUT events from the watcher.
|
||||||
|
func WithFilterPut() OpOption {
|
||||||
|
return func(op *Op) { op.filterPut = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterDelete discards DELETE events from the watcher.
|
||||||
|
func WithFilterDelete() OpOption {
|
||||||
|
return func(op *Op) { op.filterDelete = true }
|
||||||
|
}
|
||||||
|
|
||||||
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
||||||
// nothing will be returned.
|
// nothing will be returned.
|
||||||
func WithPrevKV() OpOption {
|
func WithPrevKV() OpOption {
|
||||||
@@ -283,3 +359,32 @@ func WithPrevKV() OpOption {
|
|||||||
op.prevKV = true
|
op.prevKV = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LeaseOp represents an Operation that lease can execute.
|
||||||
|
type LeaseOp struct {
|
||||||
|
id LeaseID
|
||||||
|
|
||||||
|
// for TimeToLive
|
||||||
|
attachedKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOption configures lease operations.
|
||||||
|
type LeaseOption func(*LeaseOp)
|
||||||
|
|
||||||
|
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAttachedKeys requests lease timetolive API to return
|
||||||
|
// attached keys of given lease ID.
|
||||||
|
func WithAttachedKeys() LeaseOption {
|
||||||
|
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
|
||||||
|
ret := &LeaseOp{id: id}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||||
|
}
|
||||||
|
|||||||
120
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
120
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
@@ -15,68 +15,117 @@
|
|||||||
package clientv3
|
package clientv3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
type rpcFunc func(ctx context.Context) error
|
type rpcFunc func(ctx context.Context) error
|
||||||
type retryRpcFunc func(context.Context, rpcFunc)
|
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||||
|
|
||||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||||
return func(rpcCtx context.Context, f rpcFunc) {
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
for {
|
for {
|
||||||
err := f(rpcCtx)
|
err := f(rpcCtx)
|
||||||
// ignore grpc conn closing on fail-fast calls; they are transient errors
|
if err == nil {
|
||||||
if err == nil || !isConnClosing(err) {
|
return nil
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
eErr := rpctypes.Error(err)
|
||||||
|
// always stop retry on etcd errors
|
||||||
|
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// only retry if unavailable
|
||||||
|
if grpc.Code(err) != codes.Unavailable {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-c.balancer.ConnectNotify():
|
case <-c.balancer.ConnectNotify():
|
||||||
case <-rpcCtx.Done():
|
case <-rpcCtx.Done():
|
||||||
|
return rpcCtx.Err()
|
||||||
case <-c.ctx.Done():
|
case <-c.ctx.Done():
|
||||||
return
|
return c.ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryKVClient struct {
|
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||||
pb.KVClient
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
retryf retryRpcFunc
|
for {
|
||||||
|
err := f(rpcCtx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// always stop retry on etcd errors other than invalid auth token
|
||||||
|
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||||
|
gterr := c.getToken(rpcCtx)
|
||||||
|
if gterr != nil {
|
||||||
|
return err // return the original error for simplicity
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||||
func RetryKVClient(c *Client) pb.KVClient {
|
func RetryKVClient(c *Client) pb.KVClient {
|
||||||
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||||
|
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
type retryKVClient struct {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
*retryWriteKVClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryWriteKVClient struct {
|
||||||
|
pb.KVClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -90,11 +139,12 @@ type retryLeaseClient struct {
|
|||||||
|
|
||||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||||
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||||
|
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -103,7 +153,7 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -121,7 +171,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -129,7 +179,7 @@ func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -137,7 +187,7 @@ func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -155,7 +205,7 @@ func RetryAuthClient(c *Client) pb.AuthClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -163,7 +213,7 @@ func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableReq
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -171,7 +221,7 @@ func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableR
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -179,7 +229,7 @@ func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddReque
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -187,7 +237,7 @@ func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDelet
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -195,7 +245,7 @@ func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthU
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -203,7 +253,7 @@ func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGr
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -211,7 +261,7 @@ func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserR
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -219,7 +269,7 @@ func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddReque
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -227,7 +277,7 @@ func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDelet
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@@ -235,7 +285,7 @@ func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.Auth
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|||||||
8
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
8
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
|||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Txn is the interface that wraps mini-transactions.
|
// Txn is the interface that wraps mini-transactions.
|
||||||
@@ -152,7 +153,12 @@ func (txn *txn) Commit() (*TxnResponse, error) {
|
|||||||
|
|
||||||
func (txn *txn) commit() (*TxnResponse, error) {
|
func (txn *txn) commit() (*TxnResponse, error) {
|
||||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||||
resp, err := txn.kv.remote.Txn(txn.ctx, r)
|
|
||||||
|
var opts []grpc.CallOption
|
||||||
|
if !txn.isWrite {
|
||||||
|
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||||
|
}
|
||||||
|
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
83
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
83
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
@@ -61,8 +61,8 @@ type WatchResponse struct {
|
|||||||
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
||||||
Canceled bool
|
Canceled bool
|
||||||
|
|
||||||
// created is used to indicate the creation of the watcher.
|
// Created is used to indicate the creation of the watcher.
|
||||||
created bool
|
Created bool
|
||||||
|
|
||||||
closeErr error
|
closeErr error
|
||||||
}
|
}
|
||||||
@@ -92,7 +92,7 @@ func (wr *WatchResponse) Err() error {
|
|||||||
|
|
||||||
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
||||||
func (wr *WatchResponse) IsProgressNotify() bool {
|
func (wr *WatchResponse) IsProgressNotify() bool {
|
||||||
return len(wr.Events) == 0 && !wr.Canceled && !wr.created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcher implements the Watcher interface
|
// watcher implements the Watcher interface
|
||||||
@@ -101,6 +101,7 @@ type watcher struct {
|
|||||||
|
|
||||||
// mu protects the grpc streams map
|
// mu protects the grpc streams map
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
// streams holds all the active grpc streams keyed by ctx value.
|
// streams holds all the active grpc streams keyed by ctx value.
|
||||||
streams map[string]*watchGrpcStream
|
streams map[string]*watchGrpcStream
|
||||||
}
|
}
|
||||||
@@ -131,6 +132,8 @@ type watchGrpcStream struct {
|
|||||||
errc chan error
|
errc chan error
|
||||||
// closingc gets the watcherStream of closing watchers
|
// closingc gets the watcherStream of closing watchers
|
||||||
closingc chan *watcherStream
|
closingc chan *watcherStream
|
||||||
|
// wg is Done when all substream goroutines have exited
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
// resumec closes to signal that all substreams should begin resuming
|
// resumec closes to signal that all substreams should begin resuming
|
||||||
resumec chan struct{}
|
resumec chan struct{}
|
||||||
@@ -144,8 +147,12 @@ type watchRequest struct {
|
|||||||
key string
|
key string
|
||||||
end string
|
end string
|
||||||
rev int64
|
rev int64
|
||||||
// progressNotify is for progress updates.
|
// send created notification event if this field is true
|
||||||
|
createdNotify bool
|
||||||
|
// progressNotify is for progress updates
|
||||||
progressNotify bool
|
progressNotify bool
|
||||||
|
// filters is the list of events to filter out
|
||||||
|
filters []pb.WatchCreateRequest_FilterType
|
||||||
// get the previous key-value pair before the event happens
|
// get the previous key-value pair before the event happens
|
||||||
prevKV bool
|
prevKV bool
|
||||||
// retc receives a chan WatchResponse once the watcher is established
|
// retc receives a chan WatchResponse once the watcher is established
|
||||||
@@ -173,8 +180,12 @@ type watcherStream struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewWatcher(c *Client) Watcher {
|
func NewWatcher(c *Client) Watcher {
|
||||||
|
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||||
return &watcher{
|
return &watcher{
|
||||||
remote: pb.NewWatchClient(c.conn),
|
remote: wc,
|
||||||
streams: make(map[string]*watchGrpcStream),
|
streams: make(map[string]*watchGrpcStream),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -215,12 +226,22 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
|||||||
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
||||||
ow := opWatch(key, opts...)
|
ow := opWatch(key, opts...)
|
||||||
|
|
||||||
|
var filters []pb.WatchCreateRequest_FilterType
|
||||||
|
if ow.filterPut {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NOPUT)
|
||||||
|
}
|
||||||
|
if ow.filterDelete {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NODELETE)
|
||||||
|
}
|
||||||
|
|
||||||
wr := &watchRequest{
|
wr := &watchRequest{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
createdNotify: ow.createdNotify,
|
||||||
key: string(ow.key),
|
key: string(ow.key),
|
||||||
end: string(ow.end),
|
end: string(ow.end),
|
||||||
rev: ow.rev,
|
rev: ow.rev,
|
||||||
progressNotify: ow.progressNotify,
|
progressNotify: ow.progressNotify,
|
||||||
|
filters: filters,
|
||||||
prevKV: ow.prevKV,
|
prevKV: ow.prevKV,
|
||||||
retc: make(chan chan WatchResponse, 1),
|
retc: make(chan chan WatchResponse, 1),
|
||||||
}
|
}
|
||||||
@@ -374,18 +395,20 @@ func (w *watchGrpcStream) run() {
|
|||||||
for _, ws := range w.substreams {
|
for _, ws := range w.substreams {
|
||||||
if _, ok := closing[ws]; !ok {
|
if _, ok := closing[ws]; !ok {
|
||||||
close(ws.recvc)
|
close(ws.recvc)
|
||||||
|
closing[ws] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, ws := range w.resuming {
|
for _, ws := range w.resuming {
|
||||||
if _, ok := closing[ws]; ws != nil && !ok {
|
if _, ok := closing[ws]; ws != nil && !ok {
|
||||||
close(ws.recvc)
|
close(ws.recvc)
|
||||||
|
closing[ws] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.joinSubstreams()
|
w.joinSubstreams()
|
||||||
for toClose := len(w.substreams) + len(w.resuming); toClose > 0; toClose-- {
|
for range closing {
|
||||||
w.closeSubstream(<-w.closingc)
|
w.closeSubstream(<-w.closingc)
|
||||||
}
|
}
|
||||||
|
w.wg.Wait()
|
||||||
w.owner.closeStream(w)
|
w.owner.closeStream(w)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -410,6 +433,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ws.donec = make(chan struct{})
|
ws.donec = make(chan struct{})
|
||||||
|
w.wg.Add(1)
|
||||||
go w.serveSubstream(ws, w.resumec)
|
go w.serveSubstream(ws, w.resumec)
|
||||||
|
|
||||||
// queue up for watcher creation/resume
|
// queue up for watcher creation/resume
|
||||||
@@ -458,7 +482,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
}
|
}
|
||||||
// watch client failed to recv; spawn another if possible
|
// watch client failed to recv; spawn another if possible
|
||||||
case err := <-w.errc:
|
case err := <-w.errc:
|
||||||
if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||||
closeErr = err
|
closeErr = err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -508,7 +532,7 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
|||||||
Header: *pbresp.Header,
|
Header: *pbresp.Header,
|
||||||
Events: events,
|
Events: events,
|
||||||
CompactRevision: pbresp.CompactRevision,
|
CompactRevision: pbresp.CompactRevision,
|
||||||
created: pbresp.Created,
|
Created: pbresp.Created,
|
||||||
Canceled: pbresp.Canceled,
|
Canceled: pbresp.Canceled,
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
@@ -555,6 +579,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
|||||||
if !resuming {
|
if !resuming {
|
||||||
w.closingc <- ws
|
w.closingc <- ws
|
||||||
}
|
}
|
||||||
|
w.wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
emptyWr := &WatchResponse{}
|
emptyWr := &WatchResponse{}
|
||||||
@@ -562,14 +587,6 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
|||||||
curWr := emptyWr
|
curWr := emptyWr
|
||||||
outc := ws.outc
|
outc := ws.outc
|
||||||
|
|
||||||
if len(ws.buf) > 0 && ws.buf[0].created {
|
|
||||||
select {
|
|
||||||
case ws.initReq.retc <- ws.outc:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
ws.buf = ws.buf[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ws.buf) > 0 {
|
if len(ws.buf) > 0 {
|
||||||
curWr = ws.buf[0]
|
curWr = ws.buf[0]
|
||||||
} else {
|
} else {
|
||||||
@@ -587,13 +604,35 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
|||||||
// shutdown from closeSubstream
|
// shutdown from closeSubstream
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO pause channel if buffer gets too large
|
|
||||||
ws.buf = append(ws.buf, wr)
|
if wr.Created {
|
||||||
|
if ws.initReq.retc != nil {
|
||||||
|
ws.initReq.retc <- ws.outc
|
||||||
|
// to prevent next write from taking the slot in buffered channel
|
||||||
|
// and posting duplicate create events
|
||||||
|
ws.initReq.retc = nil
|
||||||
|
|
||||||
|
// send first creation event only if requested
|
||||||
|
if ws.initReq.createdNotify {
|
||||||
|
ws.outc <- *wr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
nextRev = wr.Header.Revision
|
nextRev = wr.Header.Revision
|
||||||
if len(wr.Events) > 0 {
|
if len(wr.Events) > 0 {
|
||||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||||
}
|
}
|
||||||
ws.initReq.rev = nextRev
|
ws.initReq.rev = nextRev
|
||||||
|
|
||||||
|
// created event is already sent above,
|
||||||
|
// watcher should not post duplicate events
|
||||||
|
if wr.Created {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO pause channel if buffer gets too large
|
||||||
|
ws.buf = append(ws.buf, wr)
|
||||||
case <-w.ctx.Done():
|
case <-w.ctx.Done():
|
||||||
return
|
return
|
||||||
case <-ws.initReq.ctx.Done():
|
case <-ws.initReq.ctx.Done():
|
||||||
@@ -639,6 +678,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ws.donec = make(chan struct{})
|
ws.donec = make(chan struct{})
|
||||||
|
w.wg.Add(1)
|
||||||
go w.serveSubstream(ws, w.resumec)
|
go w.serveSubstream(ws, w.resumec)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -659,6 +699,10 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
|||||||
go func(ws *watcherStream) {
|
go func(ws *watcherStream) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if ws.closing {
|
if ws.closing {
|
||||||
|
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||||
|
close(ws.outc)
|
||||||
|
ws.outc = nil
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
@@ -719,6 +763,7 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
|||||||
Key: []byte(wr.key),
|
Key: []byte(wr.key),
|
||||||
RangeEnd: []byte(wr.end),
|
RangeEnd: []byte(wr.end),
|
||||||
ProgressNotify: wr.progressNotify,
|
ProgressNotify: wr.progressNotify,
|
||||||
|
Filters: wr.filters,
|
||||||
PrevKv: wr.prevKV,
|
PrevKv: wr.prevKV,
|
||||||
}
|
}
|
||||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/compactor/compactor.go
generated
vendored
2
vendor/github.com/coreos/etcd/compactor/compactor.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
16
vendor/github.com/coreos/etcd/discovery/discovery.go
generated
vendored
16
vendor/github.com/coreos/etcd/discovery/discovery.go
generated
vendored
@@ -52,7 +52,8 @@ var (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Number of retries discovery will attempt before giving up and erroring out.
|
// Number of retries discovery will attempt before giving up and erroring out.
|
||||||
nRetries = uint(math.MaxUint32)
|
nRetries = uint(math.MaxUint32)
|
||||||
|
maxExpoentialRetries = uint(8)
|
||||||
)
|
)
|
||||||
|
|
||||||
// JoinCluster will connect to the discovery service at the given url, and
|
// JoinCluster will connect to the discovery service at the given url, and
|
||||||
@@ -243,7 +244,7 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
|||||||
}
|
}
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, err
|
||||||
}
|
}
|
||||||
nodes := make([]*client.Node, 0)
|
var nodes []*client.Node
|
||||||
// append non-config keys to nodes
|
// append non-config keys to nodes
|
||||||
for _, n := range resp.Node.Nodes {
|
for _, n := range resp.Node.Nodes {
|
||||||
if !(path.Base(n.Key) == path.Base(configKey)) {
|
if !(path.Base(n.Key) == path.Base(configKey)) {
|
||||||
@@ -268,9 +269,14 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
|||||||
|
|
||||||
func (d *discovery) logAndBackoffForRetry(step string) {
|
func (d *discovery) logAndBackoffForRetry(step string) {
|
||||||
d.retries++
|
d.retries++
|
||||||
retryTime := time.Second * (0x1 << d.retries)
|
// logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
|
||||||
plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTime)
|
retries := d.retries
|
||||||
d.clock.Sleep(retryTime)
|
if retries > maxExpoentialRetries {
|
||||||
|
retries = maxExpoentialRetries
|
||||||
|
}
|
||||||
|
retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
|
||||||
|
plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTimeInSecond)
|
||||||
|
d.clock.Sleep(retryTimeInSecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
|
func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
|
||||||
|
|||||||
33
vendor/github.com/coreos/etcd/discovery/srv.go
generated
vendored
33
vendor/github.com/coreos/etcd/discovery/srv.go
generated
vendored
@@ -17,6 +17,7 @@ package discovery
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
@@ -33,9 +34,8 @@ var (
|
|||||||
// Also doesn't do any lookups for the token (though it could)
|
// Also doesn't do any lookups for the token (though it could)
|
||||||
// Also sees each entry as a separate instance.
|
// Also sees each entry as a separate instance.
|
||||||
func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) {
|
func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) {
|
||||||
stringParts := make([]string, 0)
|
|
||||||
tempName := int(0)
|
tempName := int(0)
|
||||||
tcpAPUrls := make([]string, 0)
|
tcp2ap := make(map[string]url.URL)
|
||||||
|
|
||||||
// First, resolve the apurls
|
// First, resolve the apurls
|
||||||
for _, url := range apurls {
|
for _, url := range apurls {
|
||||||
@@ -44,10 +44,11 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
|
|||||||
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
|
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
tcpAPUrls = append(tcpAPUrls, tcpAddr.String())
|
tcp2ap[tcpAddr.String()] = url
|
||||||
}
|
}
|
||||||
|
|
||||||
updateNodeMap := func(service, prefix string) error {
|
stringParts := []string{}
|
||||||
|
updateNodeMap := func(service, scheme string) error {
|
||||||
_, addrs, err := lookupSRV(service, "tcp", dns)
|
_, addrs, err := lookupSRV(service, "tcp", dns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -61,35 +62,37 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
n := ""
|
n := ""
|
||||||
for _, url := range tcpAPUrls {
|
url, ok := tcp2ap[tcpAddr.String()]
|
||||||
if url == tcpAddr.String() {
|
if ok {
|
||||||
n = name
|
n = name
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if n == "" {
|
if n == "" {
|
||||||
n = fmt.Sprintf("%d", tempName)
|
n = fmt.Sprintf("%d", tempName)
|
||||||
tempName += 1
|
tempName++
|
||||||
}
|
}
|
||||||
// SRV records have a trailing dot but URL shouldn't.
|
// SRV records have a trailing dot but URL shouldn't.
|
||||||
shortHost := strings.TrimSuffix(srv.Target, ".")
|
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||||
urlHost := net.JoinHostPort(shortHost, port)
|
urlHost := net.JoinHostPort(shortHost, port)
|
||||||
stringParts = append(stringParts, fmt.Sprintf("%s=%s%s", n, prefix, urlHost))
|
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||||
plog.Noticef("got bootstrap from DNS for %s at %s%s", service, prefix, urlHost)
|
plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost)
|
||||||
|
if ok && url.Scheme != scheme {
|
||||||
|
plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
failCount := 0
|
failCount := 0
|
||||||
err := updateNodeMap("etcd-server-ssl", "https://")
|
err := updateNodeMap("etcd-server-ssl", "https")
|
||||||
srvErr := make([]string, 2)
|
srvErr := make([]string, 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err)
|
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err)
|
||||||
failCount += 1
|
failCount++
|
||||||
}
|
}
|
||||||
err = updateNodeMap("etcd-server", "http://")
|
err = updateNodeMap("etcd-server", "http")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err)
|
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err)
|
||||||
failCount += 1
|
failCount++
|
||||||
}
|
}
|
||||||
if failCount == 2 {
|
if failCount == 2 {
|
||||||
plog.Warningf(srvErr[0])
|
plog.Warningf(srvErr[0])
|
||||||
|
|||||||
5
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
5
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
@@ -30,15 +30,14 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "api")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||||
|
|
||||||
// capabilityMaps is a static map of version to capability map.
|
// capabilityMaps is a static map of version to capability map.
|
||||||
// the base capabilities is the set of capability 2.0 supports.
|
// the base capabilities is the set of capability 2.0 supports.
|
||||||
capabilityMaps = map[string]map[Capability]bool{
|
capabilityMaps = map[string]map[Capability]bool{
|
||||||
"2.1.0": {AuthCapability: true},
|
|
||||||
"2.2.0": {AuthCapability: true},
|
|
||||||
"2.3.0": {AuthCapability: true},
|
"2.3.0": {AuthCapability: true},
|
||||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||||
|
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
enableMapMu sync.RWMutex
|
enableMapMu sync.RWMutex
|
||||||
|
|||||||
104
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
104
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
@@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/pprof"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -57,7 +56,6 @@ const (
|
|||||||
healthPath = "/health"
|
healthPath = "/health"
|
||||||
versionPath = "/version"
|
versionPath = "/version"
|
||||||
configPath = "/config"
|
configPath = "/config"
|
||||||
pprofPrefix = "/debug/pprof"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
|
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
|
||||||
@@ -113,23 +111,6 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
|
|||||||
mux.Handle(deprecatedMachinesPrefix, dmh)
|
mux.Handle(deprecatedMachinesPrefix, dmh)
|
||||||
handleAuth(mux, sech)
|
handleAuth(mux, sech)
|
||||||
|
|
||||||
if server.IsPprofEnabled() {
|
|
||||||
plog.Infof("pprof is enabled under %s", pprofPrefix)
|
|
||||||
|
|
||||||
mux.HandleFunc(pprofPrefix, pprof.Index)
|
|
||||||
mux.HandleFunc(pprofPrefix+"/profile", pprof.Profile)
|
|
||||||
mux.HandleFunc(pprofPrefix+"/symbol", pprof.Symbol)
|
|
||||||
mux.HandleFunc(pprofPrefix+"/cmdline", pprof.Cmdline)
|
|
||||||
// TODO: currently, we don't create an entry for pprof.Trace,
|
|
||||||
// because go 1.4 doesn't provide it. After support of go 1.4 is dropped,
|
|
||||||
// we should add the entry.
|
|
||||||
|
|
||||||
mux.Handle(pprofPrefix+"/heap", pprof.Handler("heap"))
|
|
||||||
mux.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine"))
|
|
||||||
mux.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate"))
|
|
||||||
mux.Handle(pprofPrefix+"/block", pprof.Handler("block"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return requestLogger(mux)
|
return requestLogger(mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,7 +134,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
clock := clockwork.NewRealClock()
|
clock := clockwork.NewRealClock()
|
||||||
startTime := clock.Now()
|
startTime := clock.Now()
|
||||||
rr, err := parseKeyRequest(r, clock)
|
rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeKeyError(w, err)
|
writeKeyError(w, err)
|
||||||
return
|
return
|
||||||
@@ -175,7 +156,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case resp.Event != nil:
|
case resp.Event != nil:
|
||||||
if err := writeKeyEvent(w, resp.Event, h.timer); err != nil {
|
if err := writeKeyEvent(w, resp.Event, noValueOnSuccess, h.timer); err != nil {
|
||||||
// Should never be reached
|
// Should never be reached
|
||||||
plog.Errorf("error writing event (%v)", err)
|
plog.Errorf("error writing event (%v)", err)
|
||||||
}
|
}
|
||||||
@@ -365,32 +346,23 @@ func serveVars(w http.ResponseWriter, r *http.Request) {
|
|||||||
fmt.Fprintf(w, "\n}\n")
|
fmt.Fprintf(w, "\n}\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: change etcdserver to raft interface when we have it.
|
|
||||||
// add test for healthHandler when we have the interface ready.
|
|
||||||
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
if !allowMethod(w, r.Method, "GET") {
|
if !allowMethod(w, r.Method, "GET") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint64(server.Leader()) == raft.None {
|
if uint64(server.Leader()) == raft.None {
|
||||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
// wait for raft's progress
|
defer cancel()
|
||||||
index := server.Index()
|
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
|
||||||
for i := 0; i < 3; i++ {
|
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||||
time.Sleep(250 * time.Millisecond)
|
return
|
||||||
if server.Index() > index {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte(`{"health": "true"}`))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
w.Write([]byte(`{"health": "true"}`))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -449,19 +421,20 @@ func logHandleFunc(w http.ResponseWriter, r *http.Request) {
|
|||||||
// parseKeyRequest converts a received http.Request on keysPrefix to
|
// parseKeyRequest converts a received http.Request on keysPrefix to
|
||||||
// a server Request, performing validation of supplied fields as appropriate.
|
// a server Request, performing validation of supplied fields as appropriate.
|
||||||
// If any validation fails, an empty Request and non-nil error is returned.
|
// If any validation fails, an empty Request and non-nil error is returned.
|
||||||
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, error) {
|
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
|
||||||
|
noValueOnSuccess := false
|
||||||
emptyReq := etcdserverpb.Request{}
|
emptyReq := etcdserverpb.Request{}
|
||||||
|
|
||||||
err := r.ParseForm()
|
err := r.ParseForm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidForm,
|
etcdErr.EcodeInvalidForm,
|
||||||
err.Error(),
|
err.Error(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasPrefix(r.URL.Path, keysPrefix) {
|
if !strings.HasPrefix(r.URL.Path, keysPrefix) {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidForm,
|
etcdErr.EcodeInvalidForm,
|
||||||
"incorrect key prefix",
|
"incorrect key prefix",
|
||||||
)
|
)
|
||||||
@@ -470,13 +443,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
|||||||
|
|
||||||
var pIdx, wIdx uint64
|
var pIdx, wIdx uint64
|
||||||
if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
|
if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeIndexNaN,
|
etcdErr.EcodeIndexNaN,
|
||||||
`invalid value for "prevIndex"`,
|
`invalid value for "prevIndex"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
|
if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeIndexNaN,
|
etcdErr.EcodeIndexNaN,
|
||||||
`invalid value for "waitIndex"`,
|
`invalid value for "waitIndex"`,
|
||||||
)
|
)
|
||||||
@@ -484,45 +457,45 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
|||||||
|
|
||||||
var rec, sort, wait, dir, quorum, stream bool
|
var rec, sort, wait, dir, quorum, stream bool
|
||||||
if rec, err = getBool(r.Form, "recursive"); err != nil {
|
if rec, err = getBool(r.Form, "recursive"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "recursive"`,
|
`invalid value for "recursive"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if sort, err = getBool(r.Form, "sorted"); err != nil {
|
if sort, err = getBool(r.Form, "sorted"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "sorted"`,
|
`invalid value for "sorted"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if wait, err = getBool(r.Form, "wait"); err != nil {
|
if wait, err = getBool(r.Form, "wait"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "wait"`,
|
`invalid value for "wait"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
// TODO(jonboulle): define what parameters dir is/isn't compatible with?
|
// TODO(jonboulle): define what parameters dir is/isn't compatible with?
|
||||||
if dir, err = getBool(r.Form, "dir"); err != nil {
|
if dir, err = getBool(r.Form, "dir"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "dir"`,
|
`invalid value for "dir"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if quorum, err = getBool(r.Form, "quorum"); err != nil {
|
if quorum, err = getBool(r.Form, "quorum"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "quorum"`,
|
`invalid value for "quorum"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if stream, err = getBool(r.Form, "stream"); err != nil {
|
if stream, err = getBool(r.Form, "stream"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "stream"`,
|
`invalid value for "stream"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if wait && r.Method != "GET" {
|
if wait && r.Method != "GET" {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`"wait" can only be used with GET requests`,
|
`"wait" can only be used with GET requests`,
|
||||||
)
|
)
|
||||||
@@ -530,19 +503,26 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
|||||||
|
|
||||||
pV := r.FormValue("prevValue")
|
pV := r.FormValue("prevValue")
|
||||||
if _, ok := r.Form["prevValue"]; ok && pV == "" {
|
if _, ok := r.Form["prevValue"]; ok && pV == "" {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodePrevValueRequired,
|
etcdErr.EcodePrevValueRequired,
|
||||||
`"prevValue" cannot be empty`,
|
`"prevValue" cannot be empty`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
|
||||||
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
|
etcdErr.EcodeInvalidField,
|
||||||
|
`invalid value for "noValueOnSuccess"`,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// TTL is nullable, so leave it null if not specified
|
// TTL is nullable, so leave it null if not specified
|
||||||
// or an empty string
|
// or an empty string
|
||||||
var ttl *uint64
|
var ttl *uint64
|
||||||
if len(r.FormValue("ttl")) > 0 {
|
if len(r.FormValue("ttl")) > 0 {
|
||||||
i, err := getUint64(r.Form, "ttl")
|
i, err := getUint64(r.Form, "ttl")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeTTLNaN,
|
etcdErr.EcodeTTLNaN,
|
||||||
`invalid value for "ttl"`,
|
`invalid value for "ttl"`,
|
||||||
)
|
)
|
||||||
@@ -555,7 +535,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
|||||||
if _, ok := r.Form["prevExist"]; ok {
|
if _, ok := r.Form["prevExist"]; ok {
|
||||||
bv, err := getBool(r.Form, "prevExist")
|
bv, err := getBool(r.Form, "prevExist")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
"invalid value for prevExist",
|
"invalid value for prevExist",
|
||||||
)
|
)
|
||||||
@@ -568,7 +548,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
|||||||
if _, ok := r.Form["refresh"]; ok {
|
if _, ok := r.Form["refresh"]; ok {
|
||||||
bv, err := getBool(r.Form, "refresh")
|
bv, err := getBool(r.Form, "refresh")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
"invalid value for refresh",
|
"invalid value for refresh",
|
||||||
)
|
)
|
||||||
@@ -577,13 +557,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
|||||||
if refresh != nil && *refresh {
|
if refresh != nil && *refresh {
|
||||||
val := r.FormValue("value")
|
val := r.FormValue("value")
|
||||||
if _, ok := r.Form["value"]; ok && val != "" {
|
if _, ok := r.Form["value"]; ok && val != "" {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeRefreshValue,
|
etcdErr.EcodeRefreshValue,
|
||||||
`A value was provided on a refresh`,
|
`A value was provided on a refresh`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if ttl == nil {
|
if ttl == nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeRefreshTTLRequired,
|
etcdErr.EcodeRefreshTTLRequired,
|
||||||
`No TTL value set`,
|
`No TTL value set`,
|
||||||
)
|
)
|
||||||
@@ -621,13 +601,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
|||||||
rr.Expiration = clock.Now().Add(expr).UnixNano()
|
rr.Expiration = clock.Now().Add(expr).UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
return rr, nil
|
return rr, noValueOnSuccess, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeKeyEvent trims the prefix of key path in a single Event under
|
// writeKeyEvent trims the prefix of key path in a single Event under
|
||||||
// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
|
// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
|
||||||
// ResponseWriter, along with the appropriate headers.
|
// ResponseWriter, along with the appropriate headers.
|
||||||
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTimer) error {
|
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, noValueOnSuccess bool, rt etcdserver.RaftTimer) error {
|
||||||
if ev == nil {
|
if ev == nil {
|
||||||
return errors.New("cannot write empty Event!")
|
return errors.New("cannot write empty Event!")
|
||||||
}
|
}
|
||||||
@@ -641,6 +621,12 @@ func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTim
|
|||||||
}
|
}
|
||||||
|
|
||||||
ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
|
ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
|
||||||
|
if noValueOnSuccess &&
|
||||||
|
(ev.Action == store.Set || ev.Action == store.CompareAndSwap ||
|
||||||
|
ev.Action == store.Create || ev.Action == store.Update) {
|
||||||
|
ev.Node = nil
|
||||||
|
ev.PrevNode = nil
|
||||||
|
}
|
||||||
return json.NewEncoder(w).Encode(ev)
|
return json.NewEncoder(w).Encode(ev)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -747,6 +733,10 @@ func trimErrorPrefix(err error, prefix string) error {
|
|||||||
|
|
||||||
func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
|
func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
|
||||||
ctype := r.Header.Get("Content-Type")
|
ctype := r.Header.Get("Content-Type")
|
||||||
|
semicolonPosition := strings.Index(ctype, ";")
|
||||||
|
if semicolonPosition != -1 {
|
||||||
|
ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
|
||||||
|
}
|
||||||
if ctype != "application/json" {
|
if ctype != "application/json" {
|
||||||
writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
|
writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
|
||||||
return false
|
return false
|
||||||
|
|||||||
4
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
@@ -35,7 +35,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v2http")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http")
|
||||||
mlog = logutil.NewMergeLogger(plog)
|
mlog = logutil.NewMergeLogger(plog)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ func writeError(w http.ResponseWriter, r *http.Request, err error) {
|
|||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
switch err {
|
switch err {
|
||||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers:
|
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
|
||||||
mlog.MergeError(err)
|
mlog.MergeError(err)
|
||||||
default:
|
default:
|
||||||
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
||||||
|
|||||||
2
vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api/v2http", "httptypes")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes")
|
||||||
)
|
)
|
||||||
|
|
||||||
type HTTPError struct {
|
type HTTPError struct {
|
||||||
|
|||||||
9
vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go
generated
vendored
9
vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go
generated
vendored
@@ -26,14 +26,14 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
peerMembersPrefix = "/members"
|
peerMembersPrefix = "/members"
|
||||||
leasesPrefix = "/leases"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
|
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
|
||||||
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
|
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
|
||||||
var lh http.Handler
|
var lh http.Handler
|
||||||
if l := s.Lessor(); l != nil {
|
l := s.Lessor()
|
||||||
lh = leasehttp.NewHandler(l)
|
if l != nil {
|
||||||
|
lh = leasehttp.NewHandler(l, func() <-chan struct{} { return s.ApplyWait() })
|
||||||
}
|
}
|
||||||
return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
|
return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
|
||||||
}
|
}
|
||||||
@@ -49,7 +49,8 @@ func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler
|
|||||||
mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
|
mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
|
||||||
mux.Handle(peerMembersPrefix, mh)
|
mux.Handle(peerMembersPrefix, mh)
|
||||||
if leaseHandler != nil {
|
if leaseHandler != nil {
|
||||||
mux.Handle(leasesPrefix, leaseHandler)
|
mux.Handle(leasehttp.LeasePrefix, leaseHandler)
|
||||||
|
mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
|
||||||
}
|
}
|
||||||
mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
|
mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
|
||||||
return mux
|
return mux
|
||||||
|
|||||||
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
@@ -19,14 +19,13 @@ import (
|
|||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/pkg/capnslog"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
grpclog.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "v3rpc/grpc"))
|
grpclog.SetLogger(plog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||||
|
|||||||
42
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
42
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
@@ -15,7 +15,6 @@
|
|||||||
package v3rpc
|
package v3rpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -25,6 +24,7 @@ import (
|
|||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"github.com/coreos/etcd/raft"
|
"github.com/coreos/etcd/raft"
|
||||||
|
|
||||||
|
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
@@ -53,7 +53,8 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return metricsUnaryInterceptor(ctx, req, info, handler)
|
|
||||||
|
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,44 +89,11 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return metricsStreamInterceptor(srv, ss, info, handler)
|
|
||||||
|
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func metricsUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
|
||||||
service, method := splitMethodName(info.FullMethod)
|
|
||||||
receivedCounter.WithLabelValues(service, method).Inc()
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
resp, err = handler(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
|
||||||
}
|
|
||||||
handlingDuration.WithLabelValues(service, method).Observe(time.Since(start).Seconds())
|
|
||||||
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func metricsStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
|
||||||
service, method := splitMethodName(info.FullMethod)
|
|
||||||
receivedCounter.WithLabelValues(service, method).Inc()
|
|
||||||
|
|
||||||
err := handler(srv, ss)
|
|
||||||
if err != nil {
|
|
||||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitMethodName(fullMethodName string) (string, string) {
|
|
||||||
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
|
|
||||||
if i := strings.Index(fullMethodName, "/"); i >= 0 {
|
|
||||||
return fullMethodName[:i], fullMethodName[i+1:]
|
|
||||||
}
|
|
||||||
return "unknown", "unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
type serverStreamWithCtx struct {
|
type serverStreamWithCtx struct {
|
||||||
grpc.ServerStream
|
grpc.ServerStream
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
|||||||
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v3rpc")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
|
||||||
|
|
||||||
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
||||||
// and Txn.Failure can have at most 128 operations.
|
// and Txn.Failure can have at most 128 operations.
|
||||||
@@ -56,7 +56,7 @@ func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResp
|
|||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||||
@@ -73,7 +73,7 @@ func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse,
|
|||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||||
@@ -90,7 +90,7 @@ func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*
|
|||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||||
@@ -107,7 +107,7 @@ func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse,
|
|||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||||
|
|||||||
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
@@ -18,7 +18,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@@ -35,20 +34,27 @@ func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
|||||||
|
|
||||||
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||||
resp, err := ls.le.LeaseGrant(ctx, cr)
|
resp, err := ls.le.LeaseGrant(ctx, cr)
|
||||||
if err == lease.ErrLeaseExists {
|
|
||||||
return nil, rpctypes.ErrGRPCLeaseExist
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, togRPCError(err)
|
||||||
}
|
}
|
||||||
ls.hdr.fill(resp.Header)
|
ls.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||||
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, rpctypes.ErrGRPCLeaseNotFound
|
return nil, togRPCError(err)
|
||||||
|
}
|
||||||
|
ls.hdr.fill(resp.Header)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||||
|
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, togRPCError(err)
|
||||||
}
|
}
|
||||||
ls.hdr.fill(resp.Header)
|
ls.hdr.fill(resp.Header)
|
||||||
return resp, nil
|
return resp, nil
|
||||||
|
|||||||
54
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
54
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
|||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/auth"
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
@@ -45,6 +46,10 @@ type RaftStatusGetter interface {
|
|||||||
Leader() types.ID
|
Leader() types.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AuthGetter interface {
|
||||||
|
AuthStore() auth.AuthStore
|
||||||
|
}
|
||||||
|
|
||||||
type maintenanceServer struct {
|
type maintenanceServer struct {
|
||||||
rg RaftStatusGetter
|
rg RaftStatusGetter
|
||||||
kg KVGetter
|
kg KVGetter
|
||||||
@@ -54,7 +59,8 @@ type maintenanceServer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||||
return &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||||
|
return &authMaintenanceServer{srv, s}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||||
@@ -139,3 +145,49 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (
|
|||||||
ms.hdr.fill(resp.Header)
|
ms.hdr.fill(resp.Header)
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type authMaintenanceServer struct {
|
||||||
|
*maintenanceServer
|
||||||
|
ag AuthGetter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||||
|
authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.ag.AuthStore().IsAdminPermitted(authInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||||
|
if err := ams.isAuthenticated(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Defragment(ctx, sr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||||
|
if err := ams.isAuthenticated(srv.Context()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Snapshot(sr, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||||
|
if err := ams.isAuthenticated(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Hash(ctx, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||||
|
if err := ams.isAuthenticated(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Status(ctx, ar)
|
||||||
|
}
|
||||||
|
|||||||
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
@@ -24,8 +24,6 @@ import (
|
|||||||
"github.com/coreos/etcd/etcdserver/membership"
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ClusterServer struct {
|
type ClusterServer struct {
|
||||||
@@ -50,14 +48,8 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
|||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
m := membership.NewMember("", urls, "", &now)
|
m := membership.NewMember("", urls, "", &now)
|
||||||
err = cs.server.AddMember(ctx, *m)
|
if err = cs.server.AddMember(ctx, *m); err != nil {
|
||||||
switch {
|
return nil, togRPCError(err)
|
||||||
case err == membership.ErrIDExists:
|
|
||||||
return nil, rpctypes.ErrGRPCMemberExist
|
|
||||||
case err == membership.ErrPeerURLexists:
|
|
||||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
|
||||||
case err != nil:
|
|
||||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &pb.MemberAddResponse{
|
return &pb.MemberAddResponse{
|
||||||
@@ -67,16 +59,9 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||||
err := cs.server.RemoveMember(ctx, r.ID)
|
if err := cs.server.RemoveMember(ctx, r.ID); err != nil {
|
||||||
switch {
|
return nil, togRPCError(err)
|
||||||
case err == membership.ErrIDRemoved:
|
|
||||||
fallthrough
|
|
||||||
case err == membership.ErrIDNotFound:
|
|
||||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
|
||||||
case err != nil:
|
|
||||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
|
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,16 +70,9 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
|
|||||||
ID: types.ID(r.ID),
|
ID: types.ID(r.ID),
|
||||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||||
}
|
}
|
||||||
err := cs.server.UpdateMember(ctx, m)
|
if err := cs.server.UpdateMember(ctx, m); err != nil {
|
||||||
switch {
|
return nil, togRPCError(err)
|
||||||
case err == membership.ErrPeerURLexists:
|
|
||||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
|
||||||
case err == membership.ErrIDNotFound:
|
|
||||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
|
||||||
case err != nil:
|
|
||||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
|
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
29
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
29
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
@@ -17,31 +17,6 @@ package v3rpc
|
|||||||
import "github.com/prometheus/client_golang/prometheus"
|
import "github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
receivedCounter = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "grpc",
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Counter of received requests.",
|
|
||||||
}, []string{"grpc_service", "grpc_method"})
|
|
||||||
|
|
||||||
failedCounter = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "grpc",
|
|
||||||
Name: "requests_failed_total",
|
|
||||||
Help: "Counter of failed requests.",
|
|
||||||
}, []string{"grpc_service", "grpc_method", "grpc_code"})
|
|
||||||
|
|
||||||
handlingDuration = prometheus.NewHistogramVec(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "grpc",
|
|
||||||
Name: "unary_requests_duration_seconds",
|
|
||||||
Help: "Bucketed histogram of processing time (s) of handled unary (non-stream) requests.",
|
|
||||||
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
|
|
||||||
}, []string{"grpc_service", "grpc_method"})
|
|
||||||
|
|
||||||
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "network",
|
Subsystem: "network",
|
||||||
@@ -58,10 +33,6 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
prometheus.MustRegister(receivedCounter)
|
|
||||||
prometheus.MustRegister(failedCounter)
|
|
||||||
prometheus.MustRegister(handlingDuration)
|
|
||||||
|
|
||||||
prometheus.MustRegister(sentBytes)
|
prometheus.MustRegister(sentBytes)
|
||||||
prometheus.MustRegister(receivedBytes)
|
prometheus.MustRegister(receivedBytes)
|
||||||
}
|
}
|
||||||
|
|||||||
45
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
45
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
@@ -31,23 +31,28 @@ var (
|
|||||||
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||||
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||||
|
|
||||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||||
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
|
||||||
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||||
|
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||||
|
|
||||||
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||||
|
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
|
||||||
|
|
||||||
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
||||||
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
||||||
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
||||||
|
ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty")
|
||||||
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
||||||
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
||||||
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
||||||
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
||||||
ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied")
|
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
|
||||||
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
||||||
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||||
|
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||||
|
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||||
|
|
||||||
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||||
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||||
@@ -68,16 +73,19 @@ var (
|
|||||||
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||||
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||||
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||||
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||||
|
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||||
|
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||||
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||||
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||||
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||||
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||||
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||||
@@ -85,6 +93,8 @@ var (
|
|||||||
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||||
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||||
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||||
|
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||||
|
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||||
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||||
@@ -106,16 +116,19 @@ var (
|
|||||||
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||||
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||||
|
|
||||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||||
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
|
||||||
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||||
|
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||||
|
|
||||||
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
||||||
|
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
|
||||||
|
|
||||||
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
||||||
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
||||||
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
||||||
|
ErrUserEmpty = Error(ErrGRPCUserEmpty)
|
||||||
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
||||||
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
||||||
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
||||||
@@ -123,6 +136,8 @@ var (
|
|||||||
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
||||||
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
||||||
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||||
|
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||||
|
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||||
|
|
||||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||||
|
|||||||
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/coreos/etcd/auth"
|
"github.com/coreos/etcd/auth"
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
@@ -26,17 +27,29 @@ import (
|
|||||||
|
|
||||||
func togRPCError(err error) error {
|
func togRPCError(err error) error {
|
||||||
switch err {
|
switch err {
|
||||||
|
case membership.ErrIDRemoved:
|
||||||
|
return rpctypes.ErrGRPCMemberNotFound
|
||||||
|
case membership.ErrIDNotFound:
|
||||||
|
return rpctypes.ErrGRPCMemberNotFound
|
||||||
|
case membership.ErrIDExists:
|
||||||
|
return rpctypes.ErrGRPCMemberExist
|
||||||
|
case membership.ErrPeerURLexists:
|
||||||
|
return rpctypes.ErrGRPCPeerURLExist
|
||||||
|
case etcdserver.ErrNotEnoughStartedMembers:
|
||||||
|
return rpctypes.ErrMemberNotEnoughStarted
|
||||||
|
|
||||||
case mvcc.ErrCompacted:
|
case mvcc.ErrCompacted:
|
||||||
return rpctypes.ErrGRPCCompacted
|
return rpctypes.ErrGRPCCompacted
|
||||||
case mvcc.ErrFutureRev:
|
case mvcc.ErrFutureRev:
|
||||||
return rpctypes.ErrGRPCFutureRev
|
return rpctypes.ErrGRPCFutureRev
|
||||||
case lease.ErrLeaseNotFound:
|
case lease.ErrLeaseNotFound:
|
||||||
return rpctypes.ErrGRPCLeaseNotFound
|
return rpctypes.ErrGRPCLeaseNotFound
|
||||||
// TODO: handle error from raft and timeout
|
|
||||||
case etcdserver.ErrRequestTooLarge:
|
case etcdserver.ErrRequestTooLarge:
|
||||||
return rpctypes.ErrGRPCRequestTooLarge
|
return rpctypes.ErrGRPCRequestTooLarge
|
||||||
case etcdserver.ErrNoSpace:
|
case etcdserver.ErrNoSpace:
|
||||||
return rpctypes.ErrGRPCNoSpace
|
return rpctypes.ErrGRPCNoSpace
|
||||||
|
case etcdserver.ErrTooManyRequests:
|
||||||
|
return rpctypes.ErrTooManyRequests
|
||||||
|
|
||||||
case etcdserver.ErrNoLeader:
|
case etcdserver.ErrNoLeader:
|
||||||
return rpctypes.ErrGRPCNoLeader
|
return rpctypes.ErrGRPCNoLeader
|
||||||
@@ -48,6 +61,13 @@ func togRPCError(err error) error {
|
|||||||
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
||||||
case etcdserver.ErrTimeoutDueToConnectionLost:
|
case etcdserver.ErrTimeoutDueToConnectionLost:
|
||||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||||
|
case etcdserver.ErrUnhealthy:
|
||||||
|
return rpctypes.ErrGRPCUnhealthy
|
||||||
|
|
||||||
|
case lease.ErrLeaseNotFound:
|
||||||
|
return rpctypes.ErrGRPCLeaseNotFound
|
||||||
|
case lease.ErrLeaseExists:
|
||||||
|
return rpctypes.ErrGRPCLeaseExist
|
||||||
|
|
||||||
case auth.ErrRootUserNotExist:
|
case auth.ErrRootUserNotExist:
|
||||||
return rpctypes.ErrGRPCRootUserNotExist
|
return rpctypes.ErrGRPCRootUserNotExist
|
||||||
@@ -55,6 +75,8 @@ func togRPCError(err error) error {
|
|||||||
return rpctypes.ErrGRPCRootRoleNotExist
|
return rpctypes.ErrGRPCRootRoleNotExist
|
||||||
case auth.ErrUserAlreadyExist:
|
case auth.ErrUserAlreadyExist:
|
||||||
return rpctypes.ErrGRPCUserAlreadyExist
|
return rpctypes.ErrGRPCUserAlreadyExist
|
||||||
|
case auth.ErrUserEmpty:
|
||||||
|
return rpctypes.ErrGRPCUserEmpty
|
||||||
case auth.ErrUserNotFound:
|
case auth.ErrUserNotFound:
|
||||||
return rpctypes.ErrGRPCUserNotFound
|
return rpctypes.ErrGRPCUserNotFound
|
||||||
case auth.ErrRoleAlreadyExist:
|
case auth.ErrRoleAlreadyExist:
|
||||||
@@ -69,7 +91,11 @@ func togRPCError(err error) error {
|
|||||||
return rpctypes.ErrGRPCRoleNotGranted
|
return rpctypes.ErrGRPCRoleNotGranted
|
||||||
case auth.ErrPermissionNotGranted:
|
case auth.ErrPermissionNotGranted:
|
||||||
return rpctypes.ErrGRPCPermissionNotGranted
|
return rpctypes.ErrGRPCPermissionNotGranted
|
||||||
|
case auth.ErrAuthNotEnabled:
|
||||||
|
return rpctypes.ErrGRPCAuthNotEnabled
|
||||||
|
case auth.ErrInvalidAuthToken:
|
||||||
|
return rpctypes.ErrGRPCInvalidAuthToken
|
||||||
default:
|
default:
|
||||||
return grpc.Errorf(codes.Internal, err.Error())
|
return grpc.Errorf(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
@@ -92,6 +92,7 @@ type serverWatchStream struct {
|
|||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
// progress tracks the watchID that stream might need to send
|
// progress tracks the watchID that stream might need to send
|
||||||
// progress to.
|
// progress to.
|
||||||
|
// TODO: combine progress and prevKV into a single struct?
|
||||||
progress map[mvcc.WatchID]bool
|
progress map[mvcc.WatchID]bool
|
||||||
prevKV map[mvcc.WatchID]bool
|
prevKV map[mvcc.WatchID]bool
|
||||||
|
|
||||||
@@ -130,10 +131,14 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
|||||||
// but when stream.Context().Done() is closed, the stream's recv
|
// but when stream.Context().Done() is closed, the stream's recv
|
||||||
// may continue to block since it uses a different context, leading to
|
// may continue to block since it uses a different context, leading to
|
||||||
// deadlock when calling sws.close().
|
// deadlock when calling sws.close().
|
||||||
go func() { errc <- sws.recvLoop() }()
|
go func() {
|
||||||
|
if rerr := sws.recvLoop(); rerr != nil {
|
||||||
|
errc <- rerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
select {
|
select {
|
||||||
case err = <-errc:
|
case err = <-errc:
|
||||||
|
close(sws.ctrlStream)
|
||||||
case <-stream.Context().Done():
|
case <-stream.Context().Done():
|
||||||
err = stream.Context().Err()
|
err = stream.Context().Err()
|
||||||
// the only server-side cancellation is noleader for now.
|
// the only server-side cancellation is noleader for now.
|
||||||
@@ -146,7 +151,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sws *serverWatchStream) recvLoop() error {
|
func (sws *serverWatchStream) recvLoop() error {
|
||||||
defer close(sws.ctrlStream)
|
|
||||||
for {
|
for {
|
||||||
req, err := sws.gRPCStream.Recv()
|
req, err := sws.gRPCStream.Recv()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
@@ -171,12 +175,14 @@ func (sws *serverWatchStream) recvLoop() error {
|
|||||||
// support >= key queries
|
// support >= key queries
|
||||||
creq.RangeEnd = []byte{}
|
creq.RangeEnd = []byte{}
|
||||||
}
|
}
|
||||||
|
filters := FiltersFromRequest(creq)
|
||||||
|
|
||||||
wsrev := sws.watchStream.Rev()
|
wsrev := sws.watchStream.Rev()
|
||||||
rev := creq.StartRevision
|
rev := creq.StartRevision
|
||||||
if rev == 0 {
|
if rev == 0 {
|
||||||
rev = wsrev + 1
|
rev = wsrev + 1
|
||||||
}
|
}
|
||||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
|
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
|
||||||
if id != -1 {
|
if id != -1 {
|
||||||
sws.mu.Lock()
|
sws.mu.Lock()
|
||||||
if creq.ProgressNotify {
|
if creq.ProgressNotify {
|
||||||
@@ -353,3 +359,25 @@ func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
|
|||||||
RaftTerm: sws.raftTimer.Term(),
|
RaftTerm: sws.raftTimer.Term(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func filterNoDelete(e mvccpb.Event) bool {
|
||||||
|
return e.Type == mvccpb.DELETE
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterNoPut(e mvccpb.Event) bool {
|
||||||
|
return e.Type == mvccpb.PUT
|
||||||
|
}
|
||||||
|
|
||||||
|
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
|
||||||
|
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
|
||||||
|
for _, ft := range creq.Filters {
|
||||||
|
switch ft {
|
||||||
|
case pb.WatchCreateRequest_NOPUT:
|
||||||
|
filters = append(filters, filterNoPut)
|
||||||
|
case pb.WatchCreateRequest_NODELETE:
|
||||||
|
filters = append(filters, filterNoDelete)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filters
|
||||||
|
}
|
||||||
|
|||||||
100
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
100
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
@@ -35,7 +35,7 @@ const (
|
|||||||
// to apply functions instead of a valid txn ID.
|
// to apply functions instead of a valid txn ID.
|
||||||
noTxn = -1
|
noTxn = -1
|
||||||
|
|
||||||
warnApplyDuration = 10 * time.Millisecond
|
warnApplyDuration = 100 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
type applyResult struct {
|
type applyResult struct {
|
||||||
@@ -258,7 +258,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
|||||||
}
|
}
|
||||||
|
|
||||||
limit := r.Limit
|
limit := r.Limit
|
||||||
if r.SortOrder != pb.RangeRequest_NONE {
|
if r.SortOrder != pb.RangeRequest_NONE ||
|
||||||
|
r.MinModRevision != 0 || r.MaxModRevision != 0 ||
|
||||||
|
r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
|
||||||
// fetch everything; sort and truncate afterwards
|
// fetch everything; sort and truncate afterwards
|
||||||
limit = 0
|
limit = 0
|
||||||
}
|
}
|
||||||
@@ -285,7 +287,31 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.SortOrder != pb.RangeRequest_NONE {
|
if r.MaxModRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
if r.MinModRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
if r.MaxCreateRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
if r.MinCreateRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
sortOrder := r.SortOrder
|
||||||
|
if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
|
||||||
|
// Since current mvcc.Range implementation returns results
|
||||||
|
// sorted by keys in lexiographically ascending order,
|
||||||
|
// sort ASCEND by default only when target is not 'KEY'
|
||||||
|
sortOrder = pb.RangeRequest_ASCEND
|
||||||
|
}
|
||||||
|
if sortOrder != pb.RangeRequest_NONE {
|
||||||
var sorter sort.Interface
|
var sorter sort.Interface
|
||||||
switch {
|
switch {
|
||||||
case r.SortTarget == pb.RangeRequest_KEY:
|
case r.SortTarget == pb.RangeRequest_KEY:
|
||||||
@@ -300,9 +326,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
|||||||
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case r.SortOrder == pb.RangeRequest_ASCEND:
|
case sortOrder == pb.RangeRequest_ASCEND:
|
||||||
sort.Sort(sorter)
|
sort.Sort(sorter)
|
||||||
case r.SortOrder == pb.RangeRequest_DESCEND:
|
case sortOrder == pb.RangeRequest_DESCEND:
|
||||||
sort.Sort(sort.Reverse(sorter))
|
sort.Sort(sort.Reverse(sorter))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -345,34 +371,23 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
revision := a.s.KV().Rev()
|
|
||||||
|
|
||||||
// When executing the operations of txn, we need to hold the txn lock.
|
// When executing the operations of txn, we need to hold the txn lock.
|
||||||
// So the reader will not see any intermediate results.
|
// So the reader will not see any intermediate results.
|
||||||
txnID := a.s.KV().TxnBegin()
|
txnID := a.s.KV().TxnBegin()
|
||||||
defer func() {
|
|
||||||
err := a.s.KV().TxnEnd(txnID)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
resps := make([]*pb.ResponseOp, len(reqs))
|
resps := make([]*pb.ResponseOp, len(reqs))
|
||||||
changedKV := false
|
|
||||||
for i := range reqs {
|
for i := range reqs {
|
||||||
if reqs[i].GetRequestRange() == nil {
|
|
||||||
changedKV = true
|
|
||||||
}
|
|
||||||
resps[i] = a.applyUnion(txnID, reqs[i])
|
resps[i] = a.applyUnion(txnID, reqs[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
if changedKV {
|
err := a.s.KV().TxnEnd(txnID)
|
||||||
revision += 1
|
if err != nil {
|
||||||
|
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
||||||
}
|
}
|
||||||
|
|
||||||
txnResp := &pb.TxnResponse{}
|
txnResp := &pb.TxnResponse{}
|
||||||
txnResp.Header = &pb.ResponseHeader{}
|
txnResp.Header = &pb.ResponseHeader{}
|
||||||
txnResp.Header.Revision = revision
|
txnResp.Header.Revision = a.s.KV().Rev()
|
||||||
txnResp.Responses = resps
|
txnResp.Responses = resps
|
||||||
txnResp.Succeeded = ok
|
txnResp.Succeeded = ok
|
||||||
return txnResp, nil
|
return txnResp, nil
|
||||||
@@ -436,6 +451,10 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
|
|||||||
if result != 0 {
|
if result != 0 {
|
||||||
return rev, false
|
return rev, false
|
||||||
}
|
}
|
||||||
|
case pb.Compare_NOT_EQUAL:
|
||||||
|
if result == 0 {
|
||||||
|
return rev, false
|
||||||
|
}
|
||||||
case pb.Compare_GREATER:
|
case pb.Compare_GREATER:
|
||||||
if result != 1 {
|
if result != 1 {
|
||||||
return rev, false
|
return rev, false
|
||||||
@@ -454,7 +473,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
|||||||
if tv.RequestRange != nil {
|
if tv.RequestRange != nil {
|
||||||
resp, err := a.Range(txnID, tv.RequestRange)
|
resp, err := a.Range(txnID, tv.RequestRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("unexpected error during txn")
|
plog.Panicf("unexpected error during txn: %v", err)
|
||||||
}
|
}
|
||||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
||||||
}
|
}
|
||||||
@@ -462,7 +481,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
|||||||
if tv.RequestPut != nil {
|
if tv.RequestPut != nil {
|
||||||
resp, err := a.Put(txnID, tv.RequestPut)
|
resp, err := a.Put(txnID, tv.RequestPut)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("unexpected error during txn")
|
plog.Panicf("unexpected error during txn: %v", err)
|
||||||
}
|
}
|
||||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
||||||
}
|
}
|
||||||
@@ -470,7 +489,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
|||||||
if tv.RequestDeleteRange != nil {
|
if tv.RequestDeleteRange != nil {
|
||||||
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
|
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("unexpected error during txn")
|
plog.Panicf("unexpected error during txn: %v", err)
|
||||||
}
|
}
|
||||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
||||||
}
|
}
|
||||||
@@ -500,7 +519,7 @@ func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
|
|||||||
resp := &pb.LeaseGrantResponse{}
|
resp := &pb.LeaseGrantResponse{}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
resp.ID = int64(l.ID)
|
resp.ID = int64(l.ID)
|
||||||
resp.TTL = l.TTL
|
resp.TTL = l.TTL()
|
||||||
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
|
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -784,3 +803,36 @@ func compareInt64(a, b int64) int {
|
|||||||
func isGteRange(rangeEnd []byte) bool {
|
func isGteRange(rangeEnd []byte) bool {
|
||||||
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func noSideEffect(r *pb.InternalRaftRequest) bool {
|
||||||
|
return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
|
||||||
|
f := func(ops []*pb.RequestOp) []*pb.RequestOp {
|
||||||
|
j := 0
|
||||||
|
for i := 0; i < len(ops); i++ {
|
||||||
|
if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ops[j] = ops[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
|
||||||
|
return ops[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.Success = f(txn.Success)
|
||||||
|
txn.Failure = f(txn.Failure)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||||
|
j := 0
|
||||||
|
for i := range rr.KVs {
|
||||||
|
rr.KVs[j] = rr.KVs[i]
|
||||||
|
if !isPrunable(&rr.KVs[i]) {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rr.KVs = rr.KVs[:j]
|
||||||
|
}
|
||||||
|
|||||||
100
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
100
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
@@ -27,8 +27,9 @@ type authApplierV3 struct {
|
|||||||
|
|
||||||
// mu serializes Apply so that user isn't corrupted and so that
|
// mu serializes Apply so that user isn't corrupted and so that
|
||||||
// serialized requests don't leak data from TOCTOU errors
|
// serialized requests don't leak data from TOCTOU errors
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
user string
|
|
||||||
|
authInfo auth.AuthInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
||||||
@@ -41,45 +42,57 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
|||||||
if r.Header != nil {
|
if r.Header != nil {
|
||||||
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
||||||
// does not have header field
|
// does not have header field
|
||||||
aa.user = r.Header.Username
|
aa.authInfo.Username = r.Header.Username
|
||||||
|
aa.authInfo.Revision = r.Header.AuthRevision
|
||||||
}
|
}
|
||||||
if needAdminPermission(r) && !aa.as.IsAdminPermitted(aa.user) {
|
if needAdminPermission(r) {
|
||||||
aa.user = ""
|
if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
|
||||||
return &applyResult{err: auth.ErrPermissionDenied}
|
aa.authInfo.Username = ""
|
||||||
|
aa.authInfo.Revision = 0
|
||||||
|
return &applyResult{err: err}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ret := aa.applierV3.Apply(r)
|
ret := aa.applierV3.Apply(r)
|
||||||
aa.user = ""
|
aa.authInfo.Username = ""
|
||||||
|
aa.authInfo.Revision = 0
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
|
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||||
if !aa.as.IsPutPermitted(aa.user, r.Key) {
|
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||||
return nil, auth.ErrPermissionDenied
|
return nil, err
|
||||||
}
|
}
|
||||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, nil) {
|
if r.PrevKv {
|
||||||
return nil, auth.ErrPermissionDenied
|
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return aa.applierV3.Put(txnID, r)
|
return aa.applierV3.Put(txnID, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||||
if !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||||
return nil, auth.ErrPermissionDenied
|
return nil, err
|
||||||
}
|
}
|
||||||
return aa.applierV3.Range(txnID, r)
|
return aa.applierV3.Range(txnID, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||||
if !aa.as.IsDeleteRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||||
return nil, auth.ErrPermissionDenied
|
return nil, err
|
||||||
}
|
}
|
||||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
if r.PrevKv {
|
||||||
return nil, auth.ErrPermissionDenied
|
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return aa.applierV3.DeleteRange(txnID, r)
|
return aa.applierV3.DeleteRange(txnID, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||||
for _, requ := range reqs {
|
for _, requ := range reqs {
|
||||||
switch tv := requ.Request.(type) {
|
switch tv := requ.Request.(type) {
|
||||||
case *pb.RequestOp_RequestRange:
|
case *pb.RequestOp_RequestRange:
|
||||||
@@ -87,8 +100,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !aa.as.IsRangePermitted(aa.user, tv.RequestRange.Key, tv.RequestRange.RangeEnd) {
|
if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
|
||||||
return false
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
case *pb.RequestOp_RequestPut:
|
case *pb.RequestOp_RequestPut:
|
||||||
@@ -96,8 +109,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !aa.as.IsPutPermitted(aa.user, tv.RequestPut.Key) {
|
if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
|
||||||
return false
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
case *pb.RequestOp_RequestDeleteRange:
|
case *pb.RequestOp_RequestDeleteRange:
|
||||||
@@ -105,29 +118,42 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if tv.RequestDeleteRange.PrevKv && !aa.as.IsRangePermitted(aa.user, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) {
|
if tv.RequestDeleteRange.PrevKv {
|
||||||
return false
|
err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
|
||||||
|
for _, c := range rt.Compare {
|
||||||
|
if err := as.IsRangePermitted(ai, c.Key, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||||
for _, c := range rt.Compare {
|
if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
|
||||||
if !aa.as.IsRangePermitted(aa.user, c.Key, nil) {
|
return nil, err
|
||||||
return nil, auth.ErrPermissionDenied
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !aa.checkTxnReqsPermission(rt.Success) {
|
|
||||||
return nil, auth.ErrPermissionDenied
|
|
||||||
}
|
|
||||||
if !aa.checkTxnReqsPermission(rt.Failure) {
|
|
||||||
return nil, auth.ErrPermissionDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
return aa.applierV3.Txn(rt)
|
return aa.applierV3.Txn(rt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
96
vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
generated
vendored
96
vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
generated
vendored
@@ -46,7 +46,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "auth")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth")
|
||||||
)
|
)
|
||||||
|
|
||||||
var rootRole = Role{
|
var rootRole = Role{
|
||||||
@@ -167,7 +167,7 @@ func (_ passwordStore) HashPassword(password string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) AllUsers() ([]string, error) {
|
func (s *store) AllUsers() ([]string, error) {
|
||||||
resp, err := s.requestResource("/users/", false)
|
resp, err := s.requestResource("/users/", false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
@@ -185,33 +185,13 @@ func (s *store) AllUsers() ([]string, error) {
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) GetUser(name string) (User, error) {
|
func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
|
||||||
resp, err := s.requestResource("/users/"+name, false)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
|
||||||
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return User{}, err
|
|
||||||
}
|
|
||||||
var u User
|
|
||||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
|
||||||
if err != nil {
|
|
||||||
return u, err
|
|
||||||
}
|
|
||||||
// Attach root role to root user.
|
|
||||||
if u.User == "root" {
|
|
||||||
u = attachRootRole(u)
|
|
||||||
}
|
|
||||||
return u, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateOrUpdateUser should be only used for creating the new user or when you are not
|
// CreateOrUpdateUser should be only used for creating the new user or when you are not
|
||||||
// sure if it is a create or update. (When only password is passed in, we are not sure
|
// sure if it is a create or update. (When only password is passed in, we are not sure
|
||||||
// if it is a update or create)
|
// if it is a update or create)
|
||||||
func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
|
func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
|
||||||
_, err = s.GetUser(user.User)
|
_, err = s.getUser(user.User, true)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
out, err = s.UpdateUser(user)
|
out, err = s.UpdateUser(user)
|
||||||
return out, false, err
|
return out, false, err
|
||||||
@@ -271,7 +251,7 @@ func (s *store) DeleteUser(name string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) UpdateUser(user User) (User, error) {
|
func (s *store) UpdateUser(user User) (User, error) {
|
||||||
old, err := s.GetUser(user.User)
|
old, err := s.getUser(user.User, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
@@ -297,7 +277,7 @@ func (s *store) UpdateUser(user User) (User, error) {
|
|||||||
|
|
||||||
func (s *store) AllRoles() ([]string, error) {
|
func (s *store) AllRoles() ([]string, error) {
|
||||||
nodes := []string{RootRoleName}
|
nodes := []string{RootRoleName}
|
||||||
resp, err := s.requestResource("/roles/", false)
|
resp, err := s.requestResource("/roles/", false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
@@ -314,23 +294,7 @@ func (s *store) AllRoles() ([]string, error) {
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) GetRole(name string) (Role, error) {
|
func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
|
||||||
if name == RootRoleName {
|
|
||||||
return rootRole, nil
|
|
||||||
}
|
|
||||||
resp, err := s.requestResource("/roles/"+name, false)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
|
||||||
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Role{}, err
|
|
||||||
}
|
|
||||||
var r Role
|
|
||||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) CreateRole(role Role) error {
|
func (s *store) CreateRole(role Role) error {
|
||||||
if role.Role == RootRoleName {
|
if role.Role == RootRoleName {
|
||||||
@@ -372,7 +336,7 @@ func (s *store) UpdateRole(role Role) (Role, error) {
|
|||||||
if role.Role == RootRoleName {
|
if role.Role == RootRoleName {
|
||||||
return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
|
return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
|
||||||
}
|
}
|
||||||
old, err := s.GetRole(role.Role)
|
old, err := s.getRole(role.Role, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
@@ -404,10 +368,10 @@ func (s *store) EnableAuth() error {
|
|||||||
return authErr(http.StatusConflict, "already enabled")
|
return authErr(http.StatusConflict, "already enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := s.GetUser("root"); err != nil {
|
if _, err := s.getUser("root", true); err != nil {
|
||||||
return authErr(http.StatusConflict, "No root user available, please create one")
|
return authErr(http.StatusConflict, "No root user available, please create one")
|
||||||
}
|
}
|
||||||
if _, err := s.GetRole(GuestRoleName); err != nil {
|
if _, err := s.getRole(GuestRoleName, true); err != nil {
|
||||||
plog.Printf("no guest role access found, creating default")
|
plog.Printf("no guest role access found, creating default")
|
||||||
if err := s.CreateRole(guestRole); err != nil {
|
if err := s.CreateRole(guestRole); err != nil {
|
||||||
plog.Errorf("error creating guest role. aborting auth enable.")
|
plog.Errorf("error creating guest role. aborting auth enable.")
|
||||||
@@ -641,3 +605,43 @@ func attachRootRole(u User) User {
|
|||||||
}
|
}
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *store) getUser(name string, quorum bool) (User, error) {
|
||||||
|
resp, err := s.requestResource("/users/"+name, false, quorum)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return User{}, err
|
||||||
|
}
|
||||||
|
var u User
|
||||||
|
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
||||||
|
if err != nil {
|
||||||
|
return u, err
|
||||||
|
}
|
||||||
|
// Attach root role to root user.
|
||||||
|
if u.User == "root" {
|
||||||
|
u = attachRootRole(u)
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *store) getRole(name string, quorum bool) (Role, error) {
|
||||||
|
if name == RootRoleName {
|
||||||
|
return rootRole, nil
|
||||||
|
}
|
||||||
|
resp, err := s.requestResource("/roles/"+name, false, quorum)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Role{}, err
|
||||||
|
}
|
||||||
|
var r Role
|
||||||
|
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
|||||||
10
vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
generated
vendored
@@ -85,7 +85,7 @@ func (s *store) detectAuth() bool {
|
|||||||
if s.server == nil {
|
if s.server == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
value, err := s.requestResource("/enabled", false)
|
value, err := s.requestResource("/enabled", false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
@@ -105,12 +105,16 @@ func (s *store) detectAuth() bool {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) requestResource(res string, dir bool) (etcdserver.Response, error) {
|
func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
|
ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
p := path.Join(StorePermsPrefix, res)
|
p := path.Join(StorePermsPrefix, res)
|
||||||
|
method := "GET"
|
||||||
|
if quorum {
|
||||||
|
method = "QGET"
|
||||||
|
}
|
||||||
rr := etcdserverpb.Request{
|
rr := etcdserverpb.Request{
|
||||||
Method: "GET",
|
Method: method,
|
||||||
Path: p,
|
Path: p,
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
}
|
}
|
||||||
|
|||||||
11
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
11
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
@@ -94,7 +94,16 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return membership.NewClusterFromMembers("", id, membs), nil
|
|
||||||
|
// check the length of membership members
|
||||||
|
// if the membership members are present then prepare and return raft cluster
|
||||||
|
// if membership members are not present then the raft cluster formed will be
|
||||||
|
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
|
||||||
|
if len(membs) > 0 {
|
||||||
|
return membership.NewClusterFromMembers("", id, membs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
||||||
}
|
}
|
||||||
|
|||||||
59
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
59
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
@@ -16,11 +16,13 @@ package etcdserver
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/netutil"
|
"github.com/coreos/etcd/pkg/netutil"
|
||||||
"github.com/coreos/etcd/pkg/transport"
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
@@ -55,8 +57,6 @@ type ServerConfig struct {
|
|||||||
|
|
||||||
StrictReconfigCheck bool
|
StrictReconfigCheck bool
|
||||||
|
|
||||||
EnablePprof bool
|
|
||||||
|
|
||||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||||
ClientCertAuthEnabled bool
|
ClientCertAuthEnabled bool
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,10 @@ type ServerConfig struct {
|
|||||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||||
// and returns an error for things that should never happen.
|
// and returns an error for things that should never happen.
|
||||||
func (c *ServerConfig) VerifyBootstrap() error {
|
func (c *ServerConfig) VerifyBootstrap() error {
|
||||||
if err := c.verifyLocalMember(true); err != nil {
|
if err := c.hasLocalMember(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.advertiseMatchesCluster(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||||
@@ -79,10 +82,9 @@ func (c *ServerConfig) VerifyBootstrap() error {
|
|||||||
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
||||||
// case and returns an error for things that should never happen.
|
// case and returns an error for things that should never happen.
|
||||||
func (c *ServerConfig) VerifyJoinExisting() error {
|
func (c *ServerConfig) VerifyJoinExisting() error {
|
||||||
// no need for strict checking since the member have announced its
|
// The member has announced its peer urls to the cluster before starting; no need to
|
||||||
// peer urls to the cluster before starting and do not have to set
|
// set the configuration again.
|
||||||
// it in the configuration again.
|
if err := c.hasLocalMember(); err != nil {
|
||||||
if err := c.verifyLocalMember(false); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||||
@@ -94,39 +96,38 @@ func (c *ServerConfig) VerifyJoinExisting() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyLocalMember verifies the configured member is in configured
|
// hasLocalMember checks that the cluster at least contains the local server.
|
||||||
// cluster. If strict is set, it also verifies the configured member
|
func (c *ServerConfig) hasLocalMember() error {
|
||||||
// has the same peer urls as configured advertised peer urls.
|
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
|
||||||
func (c *ServerConfig) verifyLocalMember(strict bool) error {
|
|
||||||
urls := c.InitialPeerURLsMap[c.Name]
|
|
||||||
// Make sure the cluster at least contains the local server.
|
|
||||||
if urls == nil {
|
|
||||||
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advertised peer URLs must match those in the cluster peer list
|
|
||||||
apurls := c.PeerURLs.StringSlice()
|
|
||||||
sort.Strings(apurls)
|
|
||||||
urls.Sort()
|
|
||||||
if strict {
|
|
||||||
if !netutil.URLStringsEqual(apurls, urls.StringSlice()) {
|
|
||||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
|
||||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServerConfig) MemberDir() string { return path.Join(c.DataDir, "member") }
|
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
|
||||||
|
func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||||
|
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
|
||||||
|
urls.Sort()
|
||||||
|
sort.Strings(apurls)
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) {
|
||||||
|
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||||
|
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||||
|
|
||||||
func (c *ServerConfig) WALDir() string {
|
func (c *ServerConfig) WALDir() string {
|
||||||
if c.DedicatedWALDir != "" {
|
if c.DedicatedWALDir != "" {
|
||||||
return c.DedicatedWALDir
|
return c.DedicatedWALDir
|
||||||
}
|
}
|
||||||
return path.Join(c.MemberDir(), "wal")
|
return filepath.Join(c.MemberDir(), "wal")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServerConfig) SnapDir() string { return path.Join(c.MemberDir(), "snap") }
|
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||||
|
|
||||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||||
|
|
||||||
|
|||||||
4
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
@@ -26,11 +26,13 @@ var (
|
|||||||
ErrTimeout = errors.New("etcdserver: request timed out")
|
ErrTimeout = errors.New("etcdserver: request timed out")
|
||||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
||||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
||||||
|
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
||||||
ErrNoSpace = errors.New("etcdserver: no space")
|
ErrNoSpace = errors.New("etcdserver: no space")
|
||||||
ErrInvalidAuthToken = errors.New("etcdserver: invalid auth token")
|
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||||
|
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||||
)
|
)
|
||||||
|
|
||||||
type DiscoveryError struct {
|
type DiscoveryError struct {
|
||||||
|
|||||||
340
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
340
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
@@ -45,6 +45,8 @@
|
|||||||
LeaseRevokeResponse
|
LeaseRevokeResponse
|
||||||
LeaseKeepAliveRequest
|
LeaseKeepAliveRequest
|
||||||
LeaseKeepAliveResponse
|
LeaseKeepAliveResponse
|
||||||
|
LeaseTimeToLiveRequest
|
||||||
|
LeaseTimeToLiveResponse
|
||||||
Member
|
Member
|
||||||
MemberAddRequest
|
MemberAddRequest
|
||||||
MemberAddResponse
|
MemberAddResponse
|
||||||
@@ -113,26 +115,28 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.ProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Request struct {
|
type Request struct {
|
||||||
ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD" json:"ID"`
|
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
|
||||||
Method string `protobuf:"bytes,2,opt,name=Method,json=method" json:"Method"`
|
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
|
||||||
Path string `protobuf:"bytes,3,opt,name=Path,json=path" json:"Path"`
|
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
|
||||||
Val string `protobuf:"bytes,4,opt,name=Val,json=val" json:"Val"`
|
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
|
||||||
Dir bool `protobuf:"varint,5,opt,name=Dir,json=dir" json:"Dir"`
|
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
|
||||||
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue,json=prevValue" json:"PrevValue"`
|
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
|
||||||
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex,json=prevIndex" json:"PrevIndex"`
|
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
|
||||||
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist,json=prevExist" json:"PrevExist,omitempty"`
|
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
|
||||||
Expiration int64 `protobuf:"varint,9,opt,name=Expiration,json=expiration" json:"Expiration"`
|
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
|
||||||
Wait bool `protobuf:"varint,10,opt,name=Wait,json=wait" json:"Wait"`
|
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
|
||||||
Since uint64 `protobuf:"varint,11,opt,name=Since,json=since" json:"Since"`
|
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
|
||||||
Recursive bool `protobuf:"varint,12,opt,name=Recursive,json=recursive" json:"Recursive"`
|
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
|
||||||
Sorted bool `protobuf:"varint,13,opt,name=Sorted,json=sorted" json:"Sorted"`
|
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
|
||||||
Quorum bool `protobuf:"varint,14,opt,name=Quorum,json=quorum" json:"Quorum"`
|
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
|
||||||
Time int64 `protobuf:"varint,15,opt,name=Time,json=time" json:"Time"`
|
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
|
||||||
Stream bool `protobuf:"varint,16,opt,name=Stream,json=stream" json:"Stream"`
|
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
|
||||||
Refresh *bool `protobuf:"varint,17,opt,name=Refresh,json=refresh" json:"Refresh,omitempty"`
|
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,8 +146,8 @@ func (*Request) ProtoMessage() {}
|
|||||||
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
|
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
|
||||||
|
|
||||||
type Metadata struct {
|
type Metadata struct {
|
||||||
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID,json=nodeID" json:"NodeID"`
|
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
|
||||||
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID,json=clusterID" json:"ClusterID"`
|
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,182 +160,182 @@ func init() {
|
|||||||
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
||||||
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
||||||
}
|
}
|
||||||
func (m *Request) Marshal() (data []byte, err error) {
|
func (m *Request) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Request) MarshalTo(data []byte) (int, error) {
|
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.ID))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Method)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
|
||||||
i += copy(data[i:], m.Method)
|
i += copy(dAtA[i:], m.Method)
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Path)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
|
||||||
i += copy(data[i:], m.Path)
|
i += copy(dAtA[i:], m.Path)
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Val)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
|
||||||
i += copy(data[i:], m.Val)
|
i += copy(dAtA[i:], m.Val)
|
||||||
data[i] = 0x28
|
dAtA[i] = 0x28
|
||||||
i++
|
i++
|
||||||
if m.Dir {
|
if m.Dir {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x32
|
dAtA[i] = 0x32
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.PrevValue)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
|
||||||
i += copy(data[i:], m.PrevValue)
|
i += copy(dAtA[i:], m.PrevValue)
|
||||||
data[i] = 0x38
|
dAtA[i] = 0x38
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.PrevIndex))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
|
||||||
if m.PrevExist != nil {
|
if m.PrevExist != nil {
|
||||||
data[i] = 0x40
|
dAtA[i] = 0x40
|
||||||
i++
|
i++
|
||||||
if *m.PrevExist {
|
if *m.PrevExist {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = 0x48
|
dAtA[i] = 0x48
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.Expiration))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
|
||||||
data[i] = 0x50
|
dAtA[i] = 0x50
|
||||||
i++
|
i++
|
||||||
if m.Wait {
|
if m.Wait {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x58
|
dAtA[i] = 0x58
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.Since))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
|
||||||
data[i] = 0x60
|
dAtA[i] = 0x60
|
||||||
i++
|
i++
|
||||||
if m.Recursive {
|
if m.Recursive {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x68
|
dAtA[i] = 0x68
|
||||||
i++
|
i++
|
||||||
if m.Sorted {
|
if m.Sorted {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x70
|
dAtA[i] = 0x70
|
||||||
i++
|
i++
|
||||||
if m.Quorum {
|
if m.Quorum {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x78
|
dAtA[i] = 0x78
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.Time))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
|
||||||
data[i] = 0x80
|
dAtA[i] = 0x80
|
||||||
i++
|
i++
|
||||||
data[i] = 0x1
|
dAtA[i] = 0x1
|
||||||
i++
|
i++
|
||||||
if m.Stream {
|
if m.Stream {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
if m.Refresh != nil {
|
if m.Refresh != nil {
|
||||||
data[i] = 0x88
|
dAtA[i] = 0x88
|
||||||
i++
|
i++
|
||||||
data[i] = 0x1
|
dAtA[i] = 0x1
|
||||||
i++
|
i++
|
||||||
if *m.Refresh {
|
if *m.Refresh {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(data[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) Marshal() (data []byte, err error) {
|
func (m *Metadata) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) MarshalTo(data []byte) (int, error) {
|
func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.NodeID))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
|
||||||
data[i] = 0x10
|
dAtA[i] = 0x10
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.ClusterID))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(data[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Etcdserver(data []byte, offset int, v uint64) int {
|
func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Etcdserver(data []byte, offset int, v uint32) int {
|
func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintEtcdserver(data []byte, offset int, v uint64) int {
|
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *Request) Size() (n int) {
|
func (m *Request) Size() (n int) {
|
||||||
@@ -392,8 +396,8 @@ func sovEtcdserver(x uint64) (n int) {
|
|||||||
func sozEtcdserver(x uint64) (n int) {
|
func sozEtcdserver(x uint64) (n int) {
|
||||||
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
}
|
}
|
||||||
func (m *Request) Unmarshal(data []byte) error {
|
func (m *Request) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -405,7 +409,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -433,7 +437,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ID |= (uint64(b) & 0x7F) << shift
|
m.ID |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -452,7 +456,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -467,7 +471,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Method = string(data[iNdEx:postIndex])
|
m.Method = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -481,7 +485,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -496,7 +500,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Path = string(data[iNdEx:postIndex])
|
m.Path = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 4:
|
case 4:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
@@ -510,7 +514,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -525,7 +529,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Val = string(data[iNdEx:postIndex])
|
m.Val = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 5:
|
case 5:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
@@ -539,7 +543,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -559,7 +563,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -574,7 +578,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.PrevValue = string(data[iNdEx:postIndex])
|
m.PrevValue = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 7:
|
case 7:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
@@ -588,7 +592,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.PrevIndex |= (uint64(b) & 0x7F) << shift
|
m.PrevIndex |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -607,7 +611,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -628,7 +632,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Expiration |= (int64(b) & 0x7F) << shift
|
m.Expiration |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -647,7 +651,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -667,7 +671,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Since |= (uint64(b) & 0x7F) << shift
|
m.Since |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -686,7 +690,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -706,7 +710,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -726,7 +730,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -746,7 +750,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Time |= (int64(b) & 0x7F) << shift
|
m.Time |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -765,7 +769,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -785,7 +789,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -796,7 +800,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
m.Refresh = &b
|
m.Refresh = &b
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -806,7 +810,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -816,8 +820,8 @@ func (m *Request) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *Metadata) Unmarshal(data []byte) error {
|
func (m *Metadata) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
@@ -829,7 +833,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -857,7 +861,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.NodeID |= (uint64(b) & 0x7F) << shift
|
m.NodeID |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -876,7 +880,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ClusterID |= (uint64(b) & 0x7F) << shift
|
m.ClusterID |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -885,7 +889,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -895,7 +899,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
|||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -905,8 +909,8 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipEtcdserver(data []byte) (n int, err error) {
|
func skipEtcdserver(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@@ -917,7 +921,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -935,7 +939,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
|||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -952,7 +956,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -975,7 +979,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
|||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
@@ -986,7 +990,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
|||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipEtcdserver(data[start:])
|
next, err := skipEtcdserver(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -1010,32 +1014,32 @@ var (
|
|||||||
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
|
||||||
|
|
||||||
var fileDescriptorEtcdserver = []byte{
|
var fileDescriptorEtcdserver = []byte{
|
||||||
// 404 bytes of a gzipped FileDescriptorProto
|
// 380 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x6e, 0x13, 0x31,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
||||||
0x14, 0x86, 0xe3, 0xc4, 0x99, 0x64, 0x4c, 0x81, 0x62, 0x45, 0xe8, 0xa9, 0x42, 0x43, 0x14, 0xb1,
|
0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
|
||||||
0xc8, 0x0a, 0xee, 0x50, 0xd2, 0x45, 0x24, 0x8a, 0x4a, 0x8a, 0xca, 0xda, 0x64, 0x1e, 0x8d, 0xa5,
|
0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
|
||||||
0xcc, 0x78, 0x6a, 0xbf, 0x19, 0x72, 0x03, 0xae, 0xc0, 0x91, 0xb2, 0xe4, 0x04, 0x08, 0xc2, 0x45,
|
0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
|
||||||
0x90, 0x3d, 0x9d, 0x60, 0xba, 0xb3, 0xbe, 0xff, 0xf7, 0xef, 0xdf, 0xf6, 0x13, 0xa7, 0x48, 0xeb,
|
0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
|
||||||
0xdc, 0xa1, 0x6d, 0xd0, 0xbe, 0xae, 0xac, 0x21, 0x23, 0x4f, 0xfe, 0x91, 0xea, 0xf3, 0xd9, 0xe4,
|
0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
|
||||||
0xd6, 0xdc, 0x9a, 0x20, 0xbc, 0xf1, 0xab, 0xd6, 0x33, 0xfb, 0xc6, 0xc5, 0x68, 0x85, 0x77, 0x35,
|
0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
|
||||||
0x3a, 0x92, 0x13, 0xd1, 0x5f, 0x2e, 0x80, 0x4d, 0xd9, 0x9c, 0x9f, 0xf3, 0xfd, 0xcf, 0x97, 0xbd,
|
0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
|
||||||
0x55, 0x5f, 0x2f, 0xe4, 0x0b, 0x91, 0x5c, 0x22, 0x6d, 0x4c, 0x0e, 0xfd, 0x29, 0x9b, 0xa7, 0xf7,
|
0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
|
||||||
0x4a, 0x52, 0x04, 0x26, 0x41, 0xf0, 0x2b, 0x45, 0x1b, 0x18, 0x44, 0x1a, 0xaf, 0x14, 0x6d, 0xe4,
|
0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
|
||||||
0x73, 0x31, 0xb8, 0x51, 0x5b, 0xe0, 0x91, 0x30, 0x68, 0xd4, 0xd6, 0xf3, 0x85, 0xb6, 0x30, 0x9c,
|
0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
|
||||||
0xb2, 0xf9, 0xb8, 0xe3, 0xb9, 0xb6, 0x72, 0x26, 0xd2, 0x2b, 0x8b, 0xcd, 0x8d, 0xda, 0xd6, 0x08,
|
0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
|
||||||
0x49, 0xb4, 0x2b, 0xad, 0x3a, 0xdc, 0x79, 0x96, 0x65, 0x8e, 0x3b, 0x18, 0x45, 0x45, 0x83, 0x27,
|
0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
|
||||||
0xe0, 0xce, 0x73, 0xb1, 0xd3, 0x8e, 0x60, 0x7c, 0x3c, 0x85, 0xb5, 0x9e, 0x80, 0xe5, 0x2b, 0x21,
|
0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
|
||||||
0x2e, 0x76, 0x95, 0xb6, 0x8a, 0xb4, 0x29, 0x21, 0x9d, 0xb2, 0xf9, 0xe0, 0x3e, 0x48, 0xe0, 0x91,
|
0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
|
||||||
0xfb, 0xbb, 0x7d, 0x52, 0x9a, 0x40, 0x44, 0x55, 0xf9, 0x57, 0xa5, 0x49, 0x9e, 0x89, 0xe1, 0xb5,
|
0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
|
||||||
0x2e, 0xd7, 0x08, 0x8f, 0xa2, 0x0e, 0x43, 0xe7, 0x91, 0x3f, 0x7f, 0x85, 0xeb, 0xda, 0x3a, 0xdd,
|
0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
|
||||||
0x20, 0x9c, 0x44, 0x5b, 0x53, 0xdb, 0x61, 0xff, 0xa6, 0xd7, 0xc6, 0x12, 0xe6, 0xf0, 0x38, 0x32,
|
0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
|
||||||
0x24, 0x2e, 0x30, 0xaf, 0x7e, 0xa8, 0x8d, 0xad, 0x0b, 0x78, 0x12, 0xab, 0x77, 0x81, 0xf9, 0x56,
|
0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
|
||||||
0x1f, 0x75, 0x81, 0xf0, 0x34, 0x6a, 0xcd, 0x49, 0x17, 0x6d, 0x2a, 0x59, 0x54, 0x05, 0x9c, 0xfe,
|
0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
|
||||||
0x97, 0x1a, 0x98, 0xcc, 0xfc, 0x47, 0x7f, 0xb1, 0xe8, 0x36, 0xf0, 0x2c, 0x7a, 0x95, 0x91, 0x6d,
|
0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
|
||||||
0xe1, 0xec, 0x9d, 0x18, 0x5f, 0x22, 0xa9, 0x5c, 0x91, 0xf2, 0x49, 0xef, 0x4d, 0x8e, 0x0f, 0xa6,
|
0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
|
||||||
0x21, 0x29, 0x03, 0xf3, 0x37, 0x7c, 0xbb, 0xad, 0x1d, 0xa1, 0x5d, 0x2e, 0xc2, 0x50, 0x1c, 0x7f,
|
0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
|
||||||
0x61, 0xdd, 0xe1, 0xf3, 0xc9, 0xfe, 0x77, 0xd6, 0xdb, 0x1f, 0x32, 0xf6, 0xe3, 0x90, 0xb1, 0x5f,
|
0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
|
||||||
0x87, 0x8c, 0x7d, 0xff, 0x93, 0xf5, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40,
|
|
||||||
0xa4, 0x02, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user