Merge remote-tracking branch 'upstream/master' into recycler_template_fix

This commit is contained in:
childsb
2016-02-10 14:47:55 -06:00
1156 changed files with 126452 additions and 81721 deletions

View File

@@ -44,6 +44,9 @@ docs/man/man1/kubectl-proxy.1
docs/man/man1/kubectl-replace.1 docs/man/man1/kubectl-replace.1
docs/man/man1/kubectl-rolling-update.1 docs/man/man1/kubectl-rolling-update.1
docs/man/man1/kubectl-rollout-history.1 docs/man/man1/kubectl-rollout-history.1
docs/man/man1/kubectl-rollout-pause.1
docs/man/man1/kubectl-rollout-resume.1
docs/man/man1/kubectl-rollout-undo.1
docs/man/man1/kubectl-rollout.1 docs/man/man1/kubectl-rollout.1
docs/man/man1/kubectl-run.1 docs/man/man1/kubectl-run.1
docs/man/man1/kubectl-scale.1 docs/man/man1/kubectl-scale.1
@@ -92,6 +95,9 @@ docs/user-guide/kubectl/kubectl_replace.md
docs/user-guide/kubectl/kubectl_rolling-update.md docs/user-guide/kubectl/kubectl_rolling-update.md
docs/user-guide/kubectl/kubectl_rollout.md docs/user-guide/kubectl/kubectl_rollout.md
docs/user-guide/kubectl/kubectl_rollout_history.md docs/user-guide/kubectl/kubectl_rollout_history.md
docs/user-guide/kubectl/kubectl_rollout_pause.md
docs/user-guide/kubectl/kubectl_rollout_resume.md
docs/user-guide/kubectl/kubectl_rollout_undo.md
docs/user-guide/kubectl/kubectl_run.md docs/user-guide/kubectl/kubectl_run.md
docs/user-guide/kubectl/kubectl_scale.md docs/user-guide/kubectl/kubectl_scale.md
docs/user-guide/kubectl/kubectl_uncordon.md docs/user-guide/kubectl/kubectl_uncordon.md

293
Godeps/Godeps.json generated
View File

@@ -132,12 +132,8 @@
}, },
{ {
"ImportPath": "github.com/boltdb/bolt", "ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.0-119-g90fef38", "Comment": "v1.1.0-65-gee4a088",
"Rev": "90fef389f98027ca55594edd7dbd6e7f3926fdad" "Rev": "ee4a0888a9abe7eefe5a0992ca4cb06864839873"
},
{
"ImportPath": "github.com/bradfitz/http2",
"Rev": "3e36af6d3af0e56fa3da71099f864933dea3d9fb"
}, },
{ {
"ImportPath": "github.com/camlistore/go4/errorutil", "ImportPath": "github.com/camlistore/go4/errorutil",
@@ -150,123 +146,123 @@
}, },
{ {
"ImportPath": "github.com/coreos/etcd/client", "ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/discovery", "ImportPath": "github.com/coreos/etcd/discovery",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/error", "ImportPath": "github.com/coreos/etcd/error",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver", "ImportPath": "github.com/coreos/etcd/etcdserver",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/crc", "ImportPath": "github.com/coreos/etcd/pkg/crc",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/fileutil", "ImportPath": "github.com/coreos/etcd/pkg/fileutil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/httputil", "ImportPath": "github.com/coreos/etcd/pkg/httputil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/idutil", "ImportPath": "github.com/coreos/etcd/pkg/idutil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/ioutil", "ImportPath": "github.com/coreos/etcd/pkg/ioutil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/netutil", "ImportPath": "github.com/coreos/etcd/pkg/netutil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/pathutil", "ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/pbutil", "ImportPath": "github.com/coreos/etcd/pkg/pbutil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/runtime", "ImportPath": "github.com/coreos/etcd/pkg/runtime",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/timeutil", "ImportPath": "github.com/coreos/etcd/pkg/timeutil",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/transport", "ImportPath": "github.com/coreos/etcd/pkg/transport",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/types", "ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/wait", "ImportPath": "github.com/coreos/etcd/pkg/wait",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/raft", "ImportPath": "github.com/coreos/etcd/raft",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/rafthttp", "ImportPath": "github.com/coreos/etcd/rafthttp",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/snap", "ImportPath": "github.com/coreos/etcd/snap",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/storage", "ImportPath": "github.com/coreos/etcd/storage",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/store", "ImportPath": "github.com/coreos/etcd/store",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/version", "ImportPath": "github.com/coreos/etcd/version",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/wal", "ImportPath": "github.com/coreos/etcd/wal",
"Comment": "v2.2.2-4-ge0c7768", "Comment": "v2.2.5",
"Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" "Rev": "bc9ddf260115d2680191c46977ae72b837785472"
}, },
{ {
"ImportPath": "github.com/coreos/go-etcd/etcd", "ImportPath": "github.com/coreos/go-etcd/etcd",
@@ -340,8 +336,8 @@
}, },
{ {
"ImportPath": "github.com/coreos/rkt/api/v1alpha", "ImportPath": "github.com/coreos/rkt/api/v1alpha",
"Comment": "v0.15.0-22-g8ac03ac", "Comment": "v1.0.0",
"Rev": "8ac03ace42034b4d6b31af9e3ef574b9e71ccc1a" "Rev": "1ddc36601c8688ff207210bc9ecbf973d09573fa"
}, },
{ {
"ImportPath": "github.com/cpuguy83/go-md2man/md2man", "ImportPath": "github.com/cpuguy83/go-md2man/md2man",
@@ -403,7 +399,7 @@
}, },
{ {
"ImportPath": "github.com/docker/spdystream", "ImportPath": "github.com/docker/spdystream",
"Rev": "c33989bcb56748d2473194d11f8ac3fc563688eb" "Rev": "106e140db2cb50923efe088bf2906b2ee5a45fec"
}, },
{ {
"ImportPath": "github.com/elazarl/go-bindata-assetfs", "ImportPath": "github.com/elazarl/go-bindata-assetfs",
@@ -425,8 +421,7 @@
}, },
{ {
"ImportPath": "github.com/fsouza/go-dockerclient", "ImportPath": "github.com/fsouza/go-dockerclient",
"Comment": "0.2.1-728-g1399676", "Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
"Rev": "299d728486342c894e7fafd68e3a4b89623bef1d"
}, },
{ {
"ImportPath": "github.com/garyburd/redigo/internal", "ImportPath": "github.com/garyburd/redigo/internal",
@@ -452,118 +447,118 @@
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/gogoproto", "ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/description", "ImportPath": "github.com/gogo/protobuf/plugin/description",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/equal", "ImportPath": "github.com/gogo/protobuf/plugin/equal",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/face", "ImportPath": "github.com/gogo/protobuf/plugin/face",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/gostring", "ImportPath": "github.com/gogo/protobuf/plugin/gostring",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/grpc", "ImportPath": "github.com/gogo/protobuf/plugin/grpc",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto", "ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/populate", "ImportPath": "github.com/gogo/protobuf/plugin/populate",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/size", "ImportPath": "github.com/gogo/protobuf/plugin/size",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/stringer", "ImportPath": "github.com/gogo/protobuf/plugin/stringer",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/testgen", "ImportPath": "github.com/gogo/protobuf/plugin/testgen",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/union", "ImportPath": "github.com/gogo/protobuf/plugin/union",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/proto", "ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/sortkeys", "ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/vanity", "ImportPath": "github.com/gogo/protobuf/vanity",
"Comment": "v0.1-108-g9dc5109", "Comment": "v0.1-125-g82d16f7",
"Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/golang/glog", "ImportPath": "github.com/golang/glog",
@@ -575,7 +570,7 @@
}, },
{ {
"ImportPath": "github.com/golang/protobuf/proto", "ImportPath": "github.com/golang/protobuf/proto",
"Rev": "7f07925444bb51fa4cf9dfe6f7661876f8852275" "Rev": "b982704f8bb716bb608144408cff30e15fbde841"
}, },
{ {
"ImportPath": "github.com/google/btree", "ImportPath": "github.com/google/btree",
@@ -583,93 +578,93 @@
}, },
{ {
"ImportPath": "github.com/google/cadvisor/api", "ImportPath": "github.com/google/cadvisor/api",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/cache/memory", "ImportPath": "github.com/google/cadvisor/cache/memory",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/collector", "ImportPath": "github.com/google/cadvisor/collector",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/container", "ImportPath": "github.com/google/cadvisor/container",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/events", "ImportPath": "github.com/google/cadvisor/events",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/fs", "ImportPath": "github.com/google/cadvisor/fs",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/healthz", "ImportPath": "github.com/google/cadvisor/healthz",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/http", "ImportPath": "github.com/google/cadvisor/http",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/info/v1", "ImportPath": "github.com/google/cadvisor/info/v1",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/info/v2", "ImportPath": "github.com/google/cadvisor/info/v2",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/manager", "ImportPath": "github.com/google/cadvisor/manager",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/metrics", "ImportPath": "github.com/google/cadvisor/metrics",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/pages", "ImportPath": "github.com/google/cadvisor/pages",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/storage", "ImportPath": "github.com/google/cadvisor/storage",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/summary", "ImportPath": "github.com/google/cadvisor/summary",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/utils", "ImportPath": "github.com/google/cadvisor/utils",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/validate", "ImportPath": "github.com/google/cadvisor/validate",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/version", "ImportPath": "github.com/google/cadvisor/version",
"Comment": "v0.20.5", "Comment": "v0.21.1",
"Rev": "9aa348ff5e191fcf3eccd59e5a434022aca77b87" "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
}, },
{ {
"ImportPath": "github.com/google/gofuzz", "ImportPath": "github.com/google/gofuzz",
@@ -824,8 +819,8 @@
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo", "ImportPath": "github.com/onsi/ginkgo",
"Comment": "v1.2.0-6-gd981d36", "Comment": "v1.2.0-42-g07d85e6",
"Rev": "d981d36e9884231afa909627b9c275e4ba678f90" "Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
}, },
{ {
"ImportPath": "github.com/onsi/gomega", "ImportPath": "github.com/onsi/gomega",
@@ -931,7 +926,7 @@
}, },
{ {
"ImportPath": "github.com/ugorji/go/codec", "ImportPath": "github.com/ugorji/go/codec",
"Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065" "Rev": "4a79e5b7b21e51ae8d61641bca20399b79735a32"
}, },
{ {
"ImportPath": "github.com/vishvananda/netlink", "ImportPath": "github.com/vishvananda/netlink",
@@ -970,6 +965,10 @@
"ImportPath": "golang.org/x/net/html", "ImportPath": "golang.org/x/net/html",
"Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e"
}, },
{
"ImportPath": "golang.org/x/net/http2",
"Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e"
},
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e"
@@ -1028,7 +1027,7 @@
}, },
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Rev": "4bd040ce23a624ff9a1d07b0e729ee189bddd51c" "Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
}, },
{ {
"ImportPath": "gopkg.in/natefinch/lumberjack.v2", "ImportPath": "gopkg.in/natefinch/lumberjack.v2",
@@ -1041,7 +1040,7 @@
}, },
{ {
"ImportPath": "k8s.io/heapster/api/v1/types", "ImportPath": "k8s.io/heapster/api/v1/types",
"Comment": "v0.19.1", "Comment": "v0.19.1-44-g0991ac5",
"Rev": "0991ac528ea24aae194e45d6dcf01896cb42cbea" "Rev": "0991ac528ea24aae194e45d6dcf01896cb42cbea"
}, },
{ {

3
Godeps/LICENSES.md generated
View File

@@ -13,7 +13,6 @@ github.com/aws/aws-sdk-go | Apache-2
github.com/beorn7/perks/quantile | MIT? github.com/beorn7/perks/quantile | MIT?
github.com/blang/semver | MITname github.com/blang/semver | MITname
github.com/boltdb/bolt | MITname github.com/boltdb/bolt | MITname
github.com/bradfitz/http2 | BSDlikeRef
github.com/camlistore/go4 | Apache-2 github.com/camlistore/go4 | Apache-2
github.com/ClusterHQ/flocker-go | UNKNOWN github.com/ClusterHQ/flocker-go | UNKNOWN
github.com/codegangsta/negroni | MITname github.com/codegangsta/negroni | MITname
@@ -23,7 +22,7 @@ github.com/coreos/go-oidc | Apache-2
github.com/coreos/go-semver | Apache-2 github.com/coreos/go-semver | Apache-2
github.com/coreos/go-systemd | Apache-2 github.com/coreos/go-systemd | Apache-2
github.com/coreos/pkg | Apache-2 github.com/coreos/pkg | Apache-2
github.com/coreos/rkt | Apache-2 github.com/coreos/rkt | MITname
github.com/cpuguy83/go-md2man | MITname github.com/cpuguy83/go-md2man | MITname
github.com/davecgh/go-spew | MIToldwithoutSellandNoDocumentationRequi github.com/davecgh/go-spew | MIToldwithoutSellandNoDocumentationRequi
github.com/daviddengcn/go-colortext | BSD? github.com/daviddengcn/go-colortext | BSD?

View File

@@ -1,54 +1,18 @@
TEST=.
BENCH=.
COVERPROFILE=/tmp/c.out
BRANCH=`git rev-parse --abbrev-ref HEAD` BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD` COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
default: build default: build
bench: race:
go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) @go test -v -race -test.run="TestSimulate_(100op|1000op)"
# http://cloc.sourceforge.net/
cloc:
@cloc --not-match-f='Makefile|_test.go' .
cover: fmt
go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) .
go tool cover -html=$(COVERPROFILE)
rm $(COVERPROFILE)
cpuprofile: fmt
@go test -c
@./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof
# go get github.com/kisielk/errcheck # go get github.com/kisielk/errcheck
errcheck: errcheck:
@echo "=== errcheck ===" @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
@errcheck github.com/boltdb/bolt
fmt: test:
@go fmt ./... @go test -v -cover .
@go test -v ./cmd/bolt
get: .PHONY: fmt test
@go get -d ./...
build: get
@mkdir -p bin
@go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt
test: fmt
@go get github.com/stretchr/testify/assert
@echo "=== TESTS ==="
@go test -v -cover -test.run=$(TEST)
@echo ""
@echo ""
@echo "=== CLI ==="
@go test -v -test.run=$(TEST) ./cmd/bolt
@echo ""
@echo ""
@echo "=== RACE DETECTOR ==="
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
.PHONY: bench cloc cover cpuprofile fmt memprofile test

View File

@@ -1,8 +1,8 @@
Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
==== ====
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
the [LMDB project][lmdb]. The goal of the project is to provide a simple, [LMDB project][lmdb]. The goal of the project is to provide a simple,
fast, and reliable database for projects that don't require a full database fast, and reliable database for projects that don't require a full database
server such as Postgres or MySQL. server such as Postgres or MySQL.
@@ -13,7 +13,6 @@ and setting values. That's it.
[hyc_symas]: https://twitter.com/hyc_symas [hyc_symas]: https://twitter.com/hyc_symas
[lmdb]: http://symas.com/mdb/ [lmdb]: http://symas.com/mdb/
## Project Status ## Project Status
Bolt is stable and the API is fixed. Full unit test coverage and randomized Bolt is stable and the API is fixed. Full unit test coverage and randomized
@@ -22,6 +21,36 @@ Bolt is currently in high-load production environments serving databases as
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
services every day. services every day.
## Table of Contents
- [Getting Started](#getting-started)
- [Installing](#installing)
- [Opening a database](#opening-a-database)
- [Transactions](#transactions)
- [Read-write transactions](#read-write-transactions)
- [Read-only transactions](#read-only-transactions)
- [Batch read-write transactions](#batch-read-write-transactions)
- [Managing transactions manually](#managing-transactions-manually)
- [Using buckets](#using-buckets)
- [Using key/value pairs](#using-keyvalue-pairs)
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- [Iterating over keys](#iterating-over-keys)
- [Prefix scans](#prefix-scans)
- [Range scans](#range-scans)
- [ForEach()](#foreach)
- [Nested buckets](#nested-buckets)
- [Database backups](#database-backups)
- [Statistics](#statistics)
- [Read-Only Mode](#read-only-mode)
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
- [Resources](#resources)
- [Comparison with other databases](#comparison-with-other-databases)
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- [LevelDB, RocksDB](#leveldb-rocksdb)
- [LMDB](#lmdb)
- [Caveats & Limitations](#caveats--limitations)
- [Reading the Source](#reading-the-source)
- [Other Projects Using Bolt](#other-projects-using-bolt)
## Getting Started ## Getting Started
@@ -180,8 +209,8 @@ and then safely close your transaction if an error is returned. This is the
recommended way to use Bolt transactions. recommended way to use Bolt transactions.
However, sometimes you may want to manually start and end your transactions. However, sometimes you may want to manually start and end your transactions.
You can use the `Tx.Begin()` function directly but _please_ be sure to close the You can use the `Tx.Begin()` function directly but **please** be sure to close
transaction. the transaction.
```go ```go
// Start a writable transaction. // Start a writable transaction.
@@ -256,7 +285,7 @@ db.View(func(tx *bolt.Tx) error {
``` ```
The `Get()` function does not return an error because its operation is The `Get()` function does not return an error because its operation is
guarenteed to work (unless there is some kind of system failure). If the key guaranteed to work (unless there is some kind of system failure). If the key
exists then it will return its byte slice value. If it doesn't exist then it exists then it will return its byte slice value. If it doesn't exist then it
will return `nil`. It's important to note that you can have a zero-length value will return `nil`. It's important to note that you can have a zero-length value
set to a key which is different than the key not existing. set to a key which is different than the key not existing.
@@ -268,6 +297,49 @@ transaction is open. If you need to use a value outside of the transaction
then you must use `copy()` to copy it to another byte slice. then you must use `copy()` to copy it to another byte slice.
### Autoincrementing integer for the bucket
By using the `NextSequence()` function, you can let Bolt determine a sequence
which can be used as the unique identifier for your key/value pairs. See the
example below.
```go
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
func (s *Store) CreateUser(u *User) error {
return s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the users bucket.
// This should be created when the DB is first opened.
b := tx.Bucket([]byte("users"))
// Generate ID for the user.
// This returns an error only if the Tx is closed or not writeable.
// That can't happen in an Update() call so I ignore the error check.
id, _ = b.NextSequence()
u.ID = int(id)
// Marshal user data into bytes.
buf, err := json.Marshal(u)
if err != nil {
return err
}
// Persist bytes to users bucket.
return b.Put(itob(u.ID), buf)
})
}
// itob returns an 8-byte big endian representation of v.
func itob(v int) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
return b
}
type User struct {
ID int
...
}
```
### Iterating over keys ### Iterating over keys
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
@@ -276,7 +348,9 @@ iteration over these keys extremely fast. To iterate over keys we'll use a
```go ```go
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket")) b := tx.Bucket([]byte("MyBucket"))
c := b.Cursor() c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() { for k, v := c.First(); k != nil; k, v = c.Next() {
@@ -300,10 +374,15 @@ Next() Move to the next key.
Prev() Move to the previous key. Prev() Move to the previous key.
``` ```
When you have iterated to the end of the cursor then `Next()` will return `nil`. Each of those functions has a return signature of `(key []byte, value []byte)`.
You must seek to a position using `First()`, `Last()`, or `Seek()` before When you have iterated to the end of the cursor then `Next()` will return a
calling `Next()` or `Prev()`. If you do not seek to a position then these `nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
functions will return `nil`. before calling `Next()` or `Prev()`. If you do not seek to a position then
these functions will return a `nil` key.
During iteration, if the key is non-`nil` but the value is `nil`, that means
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
access the sub-bucket.
#### Prefix scans #### Prefix scans
@@ -312,6 +391,7 @@ To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
```go ```go
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
c := tx.Bucket([]byte("MyBucket")).Cursor() c := tx.Bucket([]byte("MyBucket")).Cursor()
prefix := []byte("1234") prefix := []byte("1234")
@@ -331,7 +411,7 @@ date range like this:
```go ```go
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
// Assume our events bucket has RFC3339 encoded time keys. // Assume our events bucket exists and has RFC3339 encoded time keys.
c := tx.Bucket([]byte("Events")).Cursor() c := tx.Bucket([]byte("Events")).Cursor()
// Our time range spans the 90's decade. // Our time range spans the 90's decade.
@@ -355,7 +435,9 @@ all the keys in a bucket:
```go ```go
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket")) b := tx.Bucket([]byte("MyBucket"))
b.ForEach(func(k, v []byte) error { b.ForEach(func(k, v []byte) error {
fmt.Printf("key=%s, value=%s\n", k, v) fmt.Printf("key=%s, value=%s\n", k, v)
return nil return nil
@@ -382,8 +464,11 @@ func (*Bucket) DeleteBucket(key []byte) error
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
function to write a consistent view of the database to a writer. If you call function to write a consistent view of the database to a writer. If you call
this from a read-only transaction, it will perform a hot backup and not block this from a read-only transaction, it will perform a hot backup and not block
your other database reads and writes. It will also use `O_DIRECT` when available your other database reads and writes.
to prevent page cache trashing.
By default, it will use a regular file handle which will utilize the operating
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
documentation for information about optimizing for larger-than-RAM datasets.
One common use case is to backup over HTTP so you can use tools like `cURL` to One common use case is to backup over HTTP so you can use tools like `cURL` to
do database backups: do database backups:
@@ -465,6 +550,84 @@ if err != nil {
} }
``` ```
### Mobile Use (iOS/Android)
Bolt is able to run on mobile devices by leveraging the binding feature of the
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
contain your database logic and a reference to a `*bolt.DB` with a initializing
contstructor that takes in a filepath where the database file will be stored.
Neither Android nor iOS require extra permissions or cleanup from using this method.
```go
func NewBoltDB(filepath string) *BoltDB {
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
return &BoltDB{db}
}
type BoltDB struct {
db *bolt.DB
...
}
func (b *BoltDB) Path() string {
return b.db.Path()
}
func (b *BoltDB) Close() {
b.db.Close()
}
```
Database logic should be defined as methods on this wrapper struct.
To initialize this struct from the native language (both platforms now sync
their local storage to the cloud. These snippets disable that functionality for the
database file):
#### Android
```java
String path;
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
path = getNoBackupFilesDir().getAbsolutePath();
} else{
path = getFilesDir().getAbsolutePath();
}
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
```
#### iOS
```objc
- (void)demo {
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
NSUserDomainMask,
YES) objectAtIndex:0];
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
[self addSkipBackupAttributeToItemAtPath:demo.path];
//Some DB Logic would go here
[demo close];
}
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
{
NSURL* URL= [NSURL fileURLWithPath: filePathString];
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
NSError *error = nil;
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
forKey: NSURLIsExcludedFromBackupKey error: &error];
if(!success){
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
}
return success;
}
```
## Resources ## Resources
@@ -500,7 +663,7 @@ they are libraries bundled into the application, however, their underlying
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
random writes by using a write ahead log and multi-tiered, sorted files called random writes by using a write ahead log and multi-tiered, sorted files called
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
have trade offs. have trade-offs.
If you require a high random write throughput (>10,000 w/sec) or you need to use If you require a high random write throughput (>10,000 w/sec) or you need to use
spinning disks then LevelDB could be a good choice. If your application is spinning disks then LevelDB could be a good choice. If your application is
@@ -536,9 +699,8 @@ It's important to pick the right tool for the job and Bolt is no exception.
Here are a few things to note when evaluating and using Bolt: Here are a few things to note when evaluating and using Bolt:
* Bolt is good for read intensive workloads. Sequential write performance is * Bolt is good for read intensive workloads. Sequential write performance is
also fast but random writes can be slow. You can add a write-ahead log or also fast but random writes can be slow. You can use `DB.Batch()` or add a
[transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt write-ahead log to help mitigate this issue.
to mitigate this issue.
* Bolt uses a B+tree internally so there can be a lot of random page access. * Bolt uses a B+tree internally so there can be a lot of random page access.
SSDs provide a significant performance boost over spinning disks. SSDs provide a significant performance boost over spinning disks.
@@ -568,11 +730,13 @@ Here are a few things to note when evaluating and using Bolt:
can in memory and will release memory as needed to other processes. This means can in memory and will release memory as needed to other processes. This means
that Bolt can show very high memory usage when working with large databases. that Bolt can show very high memory usage when working with large databases.
However, this is expected and the OS will release memory as needed. Bolt can However, this is expected and the OS will release memory as needed. Bolt can
handle databases much larger than the available physical RAM. handle databases much larger than the available physical RAM, provided its
memory-map fits in the process virtual address space. It may be problematic
on 32-bits systems.
* The data structures in the Bolt database are memory mapped so the data file * The data structures in the Bolt database are memory mapped so the data file
will be endian specific. This means that you cannot copy a Bolt file from a will be endian specific. This means that you cannot copy a Bolt file from a
little endian machine to a big endian machine and have it work. For most little endian machine to a big endian machine and have it work. For most
users this is not a concern since most modern CPUs are little endian. users this is not a concern since most modern CPUs are little endian.
* Because of the way pages are laid out on disk, Bolt cannot truncate data files * Because of the way pages are laid out on disk, Bolt cannot truncate data files
@@ -587,6 +751,56 @@ Here are a few things to note when evaluating and using Bolt:
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
## Reading the Source
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
transactional key/value database so it can be a good starting point for people
interested in how databases work.
The best places to start are the main entry points into Bolt:
- `Open()` - Initializes the reference to the database. It's responsible for
creating the database if it doesn't exist, obtaining an exclusive lock on the
file, reading the meta pages, & memory-mapping the file.
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
value of the `writable` argument. This requires briefly obtaining the "meta"
lock to keep track of open transactions. Only one read-write transaction can
exist at a time so the "rwlock" is acquired during the life of a read-write
transaction.
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
arguments, a cursor is used to traverse the B+tree to the page and position
where they key & value will be written. Once the position is found, the bucket
materializes the underlying page and the page's parent pages into memory as
"nodes". These nodes are where mutations occur during read-write transactions.
These changes get flushed to disk during commit.
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
to move to the page & position of a key/value pair. During a read-only
transaction, the key and value data is returned as a direct reference to the
underlying mmap file so there's no allocation overhead. For read-write
transactions, this data may reference the mmap file or one of the in-memory
node values.
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
or in-memory nodes. It can seek to a specific key, move to the first or last
value, or it can move forward or backward. The cursor handles the movement up
and down the B+tree transparently to the end user.
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
into pages to be written to disk. Writing to disk then occurs in two phases.
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
new meta page with an incremented transaction ID is written and another
`fsync()` occurs. This two phase write ensures that partially written data
pages are ignored in the event of a crash since the meta page pointing to them
is never written. Partially written meta pages are invalidated because they
are written with a checksum.
If you have additional notes that could be helpful for others, please submit
them via pull request.
## Other Projects Using Bolt ## Other Projects Using Bolt
Below is a list of public, open source projects that use Bolt: Below is a list of public, open source projects that use Bolt:
@@ -597,25 +811,30 @@ Below is a list of public, open source projects that use Bolt:
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. * [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. * [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
backed by boltdb.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
If you are using Bolt in a project please send a pull request to add it to the list. If you are using Bolt in a project please send a pull request to add it to the list.

View File

@@ -1,138 +0,0 @@
package bolt
import (
"errors"
"fmt"
"sync"
"time"
)
// Batch calls fn as part of a batch. It behaves similar to Update,
// except:
//
// 1. concurrent Batch calls can be combined into a single Bolt
// transaction.
//
// 2. the function passed to Batch may be called multiple times,
// regardless of whether it returns error or not.
//
// This means that Batch function side effects must be idempotent and
// take permanent effect only after a successful return is seen in
// caller.
//
// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
// and DB.MaxBatchDelay, respectively.
//
// Batch is only useful when there are multiple goroutines calling it.
func (db *DB) Batch(fn func(*Tx) error) error {
errCh := make(chan error, 1)
db.batchMu.Lock()
if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
// There is no existing batch, or the existing batch is full; start a new one.
db.batch = &batch{
db: db,
}
db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
}
db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
if len(db.batch.calls) >= db.MaxBatchSize {
// wake up batch, it's ready to run
go db.batch.trigger()
}
db.batchMu.Unlock()
err := <-errCh
if err == trySolo {
err = db.Update(fn)
}
return err
}
type call struct {
fn func(*Tx) error
err chan<- error
}
type batch struct {
db *DB
timer *time.Timer
start sync.Once
calls []call
}
// trigger runs the batch if it hasn't already been run.
func (b *batch) trigger() {
b.start.Do(b.run)
}
// run performs the transactions in the batch and communicates results
// back to DB.Batch.
func (b *batch) run() {
b.db.batchMu.Lock()
b.timer.Stop()
// Make sure no new work is added to this batch, but don't break
// other batches.
if b.db.batch == b {
b.db.batch = nil
}
b.db.batchMu.Unlock()
retry:
for len(b.calls) > 0 {
var failIdx = -1
err := b.db.Update(func(tx *Tx) error {
for i, c := range b.calls {
if err := safelyCall(c.fn, tx); err != nil {
failIdx = i
return err
}
}
return nil
})
if failIdx >= 0 {
// take the failing transaction out of the batch. it's
// safe to shorten b.calls here because db.batch no longer
// points to us, and we hold the mutex anyway.
c := b.calls[failIdx]
b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
// tell the submitter re-run it solo, continue with the rest of the batch
c.err <- trySolo
continue retry
}
// pass success, or bolt internal errors, to all callers
for _, c := range b.calls {
if c.err != nil {
c.err <- err
}
}
break retry
}
}
// trySolo is a special sentinel error value used for signaling that a
// transaction function should be re-run. It should never be seen by
// callers.
var trySolo = errors.New("batch function returned an error and should be re-run solo")
type panicked struct {
reason interface{}
}
func (p panicked) Error() string {
if err, ok := p.reason.(error); ok {
return err.Error()
}
return fmt.Sprintf("panic: %v", p.reason)
}
func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
defer func() {
if p := recover(); p != nil {
err = panicked{p}
}
}()
return fn(tx)
}

View File

@@ -0,0 +1,9 @@
// +build arm64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View File

@@ -4,8 +4,6 @@ import (
"syscall" "syscall"
) )
var odirect = syscall.O_DIRECT
// fdatasync flushes written data to a file descriptor. // fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error { func fdatasync(db *DB) error {
return syscall.Fdatasync(int(db.file.Fd())) return syscall.Fdatasync(int(db.file.Fd()))

View File

@@ -11,8 +11,6 @@ const (
msInvalidate // invalidate cached data msInvalidate // invalidate cached data
) )
var odirect int
func msync(db *DB) error { func msync(db *DB) error {
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
if errno != 0 { if errno != 0 {

View File

@@ -0,0 +1,9 @@
// +build ppc64le
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View File

@@ -0,0 +1,9 @@
// +build s390x
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View File

@@ -46,19 +46,8 @@ func funlock(f *os.File) error {
// mmap memory maps a DB's data file. // mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error { func mmap(db *DB, sz int) error {
// Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
if err := db.file.Sync(); err != nil {
return fmt.Errorf("file sync error: %s", err)
}
}
// Map the data file to memory. // Map the data file to memory.
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,4 +1,3 @@
package bolt package bolt
import ( import (
@@ -7,6 +6,7 @@ import (
"syscall" "syscall"
"time" "time"
"unsafe" "unsafe"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@@ -56,19 +56,8 @@ func funlock(f *os.File) error {
// mmap memory maps a DB's data file. // mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error { func mmap(db *DB, sz int) error {
// Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
if err := db.file.Sync(); err != nil {
return fmt.Errorf("file sync error: %s", err)
}
}
// Map the data file to memory. // Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -8,7 +8,37 @@ import (
"unsafe" "unsafe"
) )
var odirect int // LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
)
const (
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r == 0 {
return err
}
return nil
}
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
if r == 0 {
return err
}
return nil
}
// fdatasync flushes written data to a file descriptor. // fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error { func fdatasync(db *DB) error {
@@ -16,13 +46,37 @@ func fdatasync(db *DB) error {
} }
// flock acquires an advisory lock on a file descriptor. // flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, _ bool, _ time.Duration) error { func flock(f *os.File, exclusive bool, timeout time.Duration) error {
return nil var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var flag uint32 = flagLockFailImmediately
if exclusive {
flag |= flagLockExclusive
}
err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
} }
// funlock releases an advisory lock on a file descriptor. // funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error { func funlock(f *os.File) error {
return nil return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{})
} }
// mmap memory maps a DB's data file. // mmap memory maps a DB's data file.

View File

@@ -2,8 +2,6 @@
package bolt package bolt
var odirect int
// fdatasync flushes written data to a file descriptor. // fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error { func fdatasync(db *DB) error {
return db.file.Sync() return db.file.Sync()

View File

@@ -11,7 +11,7 @@ const (
MaxKeySize = 32768 MaxKeySize = 32768
// MaxValueSize is the maximum length of a value, in bytes. // MaxValueSize is the maximum length of a value, in bytes.
MaxValueSize = 4294967295 MaxValueSize = (1 << 31) - 2
) )
const ( const (
@@ -99,6 +99,7 @@ func (b *Bucket) Cursor() *Cursor {
// Bucket retrieves a nested bucket by name. // Bucket retrieves a nested bucket by name.
// Returns nil if the bucket does not exist. // Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) Bucket(name []byte) *Bucket {
if b.buckets != nil { if b.buckets != nil {
if child := b.buckets[string(name)]; child != nil { if child := b.buckets[string(name)]; child != nil {
@@ -148,6 +149,7 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
// CreateBucket creates a new bucket at the given key and returns the new bucket. // CreateBucket creates a new bucket at the given key and returns the new bucket.
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if b.tx.db == nil { if b.tx.db == nil {
return nil, ErrTxClosed return nil, ErrTxClosed
@@ -192,6 +194,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
// Returns an error if the bucket name is blank, or if the bucket name is too long. // Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
child, err := b.CreateBucket(key) child, err := b.CreateBucket(key)
if err == ErrBucketExists { if err == ErrBucketExists {
@@ -270,6 +273,7 @@ func (b *Bucket) Get(key []byte) []byte {
// Put sets the value for a key in the bucket. // Put sets the value for a key in the bucket.
// If the key exist then its previous value will be overwritten. // If the key exist then its previous value will be overwritten.
// Supplied value must remain valid for the life of the transaction.
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
func (b *Bucket) Put(key []byte, value []byte) error { func (b *Bucket) Put(key []byte, value []byte) error {
if b.tx.db == nil { if b.tx.db == nil {
@@ -346,7 +350,8 @@ func (b *Bucket) NextSequence() (uint64, error) {
// ForEach executes a function for each key/value pair in a bucket. // ForEach executes a function for each key/value pair in a bucket.
// If the provided function returns an error then the iteration is stopped and // If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller. // the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior.
func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
if b.tx.db == nil { if b.tx.db == nil {
return ErrTxClosed return ErrTxClosed

View File

@@ -825,7 +825,10 @@ func (cmd *StatsCommand) Run(args ...string) error {
fmt.Fprintln(cmd.Stdout, "Bucket statistics") fmt.Fprintln(cmd.Stdout, "Bucket statistics")
fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN)
percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) percentage = 0
if s.BucketN != 0 {
percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN))
}
fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage)
percentage = 0 percentage = 0
if s.LeafInuse != 0 { if s.LeafInuse != 0 {

View File

@@ -34,6 +34,13 @@ func (c *Cursor) First() (key []byte, value []byte) {
p, n := c.bucket.pageNode(c.bucket.root) p, n := c.bucket.pageNode(c.bucket.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.first() c.first()
// If we land on an empty page then move to the next value.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
c.next()
}
k, v, flags := c.keyValue() k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 { if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil return k, nil
@@ -209,28 +216,37 @@ func (c *Cursor) last() {
// next moves to the next leaf element and returns the key and value. // next moves to the next leaf element and returns the key and value.
// If the cursor is at the last leaf element then it stays there and returns nil. // If the cursor is at the last leaf element then it stays there and returns nil.
func (c *Cursor) next() (key []byte, value []byte, flags uint32) { func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
// Attempt to move over one element until we're successful. for {
// Move up the stack as we hit the end of each page in our stack. // Attempt to move over one element until we're successful.
var i int // Move up the stack as we hit the end of each page in our stack.
for i = len(c.stack) - 1; i >= 0; i-- { var i int
elem := &c.stack[i] for i = len(c.stack) - 1; i >= 0; i-- {
if elem.index < elem.count()-1 { elem := &c.stack[i]
elem.index++ if elem.index < elem.count()-1 {
break elem.index++
break
}
} }
}
// If we've hit the root page then stop and return. This will leave the // If we've hit the root page then stop and return. This will leave the
// cursor on the last element of the last page. // cursor on the last element of the last page.
if i == -1 { if i == -1 {
return nil, nil, 0 return nil, nil, 0
} }
// Otherwise start from where we left off in the stack and find the // Otherwise start from where we left off in the stack and find the
// first element of the first leaf page. // first element of the first leaf page.
c.stack = c.stack[:i+1] c.stack = c.stack[:i+1]
c.first() c.first()
return c.keyValue()
// If this is an empty page then restart and move back up the stack.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
continue
}
return c.keyValue()
}
} }
// search recursively performs a binary search against a given page/node until it finds a given key. // search recursively performs a binary search against a given page/node until it finds a given key.

View File

@@ -1,8 +1,10 @@
package bolt package bolt
import ( import (
"errors"
"fmt" "fmt"
"hash/fnv" "hash/fnv"
"log"
"os" "os"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
@@ -24,13 +26,14 @@ const magic uint32 = 0xED0CDAED
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
// syncing changes to a file. This is required as some operating systems, // syncing changes to a file. This is required as some operating systems,
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes // such as OpenBSD, do not have a unified buffer cache (UBC) and writes
// must be synchronzied using the msync(2) syscall. // must be synchronized using the msync(2) syscall.
const IgnoreNoSync = runtime.GOOS == "openbsd" const IgnoreNoSync = runtime.GOOS == "openbsd"
// Default values if not set in a DB instance. // Default values if not set in a DB instance.
const ( const (
DefaultMaxBatchSize int = 1000 DefaultMaxBatchSize int = 1000
DefaultMaxBatchDelay = 10 * time.Millisecond DefaultMaxBatchDelay = 10 * time.Millisecond
DefaultAllocSize = 16 * 1024 * 1024
) )
// DB represents a collection of buckets persisted to a file on disk. // DB represents a collection of buckets persisted to a file on disk.
@@ -63,6 +66,10 @@ type DB struct {
// https://github.com/boltdb/bolt/issues/284 // https://github.com/boltdb/bolt/issues/284
NoGrowSync bool NoGrowSync bool
// If you want to read the entire database fast, you can set MmapFlag to
// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
MmapFlags int
// MaxBatchSize is the maximum size of a batch. Default value is // MaxBatchSize is the maximum size of a batch. Default value is
// copied from DefaultMaxBatchSize in Open. // copied from DefaultMaxBatchSize in Open.
// //
@@ -79,11 +86,17 @@ type DB struct {
// Do not change concurrently with calls to Batch. // Do not change concurrently with calls to Batch.
MaxBatchDelay time.Duration MaxBatchDelay time.Duration
// AllocSize is the amount of space allocated when the database
// needs to create new pages. This is done to amortize the cost
// of truncate() and fsync() when growing the data file.
AllocSize int
path string path string
file *os.File file *os.File
dataref []byte // mmap'ed readonly, write throws SEGV dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte data *[maxMapSize]byte
datasz int datasz int
filesz int // current on disk file size
meta0 *meta meta0 *meta
meta1 *meta meta1 *meta
pageSize int pageSize int
@@ -136,10 +149,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
options = DefaultOptions options = DefaultOptions
} }
db.NoGrowSync = options.NoGrowSync db.NoGrowSync = options.NoGrowSync
db.MmapFlags = options.MmapFlags
// Set default values for later DB operations. // Set default values for later DB operations.
db.MaxBatchSize = DefaultMaxBatchSize db.MaxBatchSize = DefaultMaxBatchSize
db.MaxBatchDelay = DefaultMaxBatchDelay db.MaxBatchDelay = DefaultMaxBatchDelay
db.AllocSize = DefaultAllocSize
flag := os.O_RDWR flag := os.O_RDWR
if options.ReadOnly { if options.ReadOnly {
@@ -172,7 +187,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// Initialize the database if it doesn't exist. // Initialize the database if it doesn't exist.
if info, err := db.file.Stat(); err != nil { if info, err := db.file.Stat(); err != nil {
return nil, fmt.Errorf("stat error: %s", err) return nil, err
} else if info.Size() == 0 { } else if info.Size() == 0 {
// Initialize new files with meta pages. // Initialize new files with meta pages.
if err := db.init(); err != nil { if err := db.init(); err != nil {
@@ -184,14 +199,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
if _, err := db.file.ReadAt(buf[:], 0); err == nil { if _, err := db.file.ReadAt(buf[:], 0); err == nil {
m := db.pageInBuffer(buf[:], 0).meta() m := db.pageInBuffer(buf[:], 0).meta()
if err := m.validate(); err != nil { if err := m.validate(); err != nil {
return nil, fmt.Errorf("meta0 error: %s", err) return nil, err
} }
db.pageSize = int(m.pageSize) db.pageSize = int(m.pageSize)
} }
} }
// Memory map the data file. // Memory map the data file.
if err := db.mmap(0); err != nil { if err := db.mmap(options.InitialMmapSize); err != nil {
_ = db.close() _ = db.close()
return nil, err return nil, err
} }
@@ -248,10 +263,10 @@ func (db *DB) mmap(minsz int) error {
// Validate the meta pages. // Validate the meta pages.
if err := db.meta0.validate(); err != nil { if err := db.meta0.validate(); err != nil {
return fmt.Errorf("meta0 error: %s", err) return err
} }
if err := db.meta1.validate(); err != nil { if err := db.meta1.validate(); err != nil {
return fmt.Errorf("meta1 error: %s", err) return err
} }
return nil return nil
@@ -266,7 +281,7 @@ func (db *DB) munmap() error {
} }
// mmapSize determines the appropriate size for the mmap given the current size // mmapSize determines the appropriate size for the mmap given the current size
// of the database. The minimum size is 1MB and doubles until it reaches 1GB. // of the database. The minimum size is 32KB and doubles until it reaches 1GB.
// Returns an error if the new mmap size is greater than the max allowed. // Returns an error if the new mmap size is greater than the max allowed.
func (db *DB) mmapSize(size int) (int, error) { func (db *DB) mmapSize(size int) (int, error) {
// Double the size from 32KB until 1GB. // Double the size from 32KB until 1GB.
@@ -382,7 +397,9 @@ func (db *DB) close() error {
// No need to unlock read-only file. // No need to unlock read-only file.
if !db.readOnly { if !db.readOnly {
// Unlock the file. // Unlock the file.
_ = funlock(db.file) if err := funlock(db.file); err != nil {
log.Printf("bolt.Close(): funlock error: %s", err)
}
} }
// Close the file descriptor. // Close the file descriptor.
@@ -401,11 +418,15 @@ func (db *DB) close() error {
// will cause the calls to block and be serialized until the current write // will cause the calls to block and be serialized until the current write
// transaction finishes. // transaction finishes.
// //
// Transactions should not be depedent on one another. Opening a read // Transactions should not be dependent on one another. Opening a read
// transaction and a write transaction in the same goroutine can cause the // transaction and a write transaction in the same goroutine can cause the
// writer to deadlock because the database periodically needs to re-mmap itself // writer to deadlock because the database periodically needs to re-mmap itself
// as it grows and it cannot do that while a read transaction is open. // as it grows and it cannot do that while a read transaction is open.
// //
// If a long running read transaction (for example, a snapshot transaction) is
// needed, you might want to set DB.InitialMmapSize to a large enough value
// to avoid potential blocking of write transaction.
//
// IMPORTANT: You must close read-only transactions after you are finished or // IMPORTANT: You must close read-only transactions after you are finished or
// else the database will not reclaim old pages. // else the database will not reclaim old pages.
func (db *DB) Begin(writable bool) (*Tx, error) { func (db *DB) Begin(writable bool) (*Tx, error) {
@@ -589,6 +610,136 @@ func (db *DB) View(fn func(*Tx) error) error {
return nil return nil
} }
// Batch calls fn as part of a batch. It behaves similar to Update,
// except:
//
// 1. concurrent Batch calls can be combined into a single Bolt
// transaction.
//
// 2. the function passed to Batch may be called multiple times,
// regardless of whether it returns error or not.
//
// This means that Batch function side effects must be idempotent and
// take permanent effect only after a successful return is seen in
// caller.
//
// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
// and DB.MaxBatchDelay, respectively.
//
// Batch is only useful when there are multiple goroutines calling it.
func (db *DB) Batch(fn func(*Tx) error) error {
errCh := make(chan error, 1)
db.batchMu.Lock()
if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
// There is no existing batch, or the existing batch is full; start a new one.
db.batch = &batch{
db: db,
}
db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
}
db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
if len(db.batch.calls) >= db.MaxBatchSize {
// wake up batch, it's ready to run
go db.batch.trigger()
}
db.batchMu.Unlock()
err := <-errCh
if err == trySolo {
err = db.Update(fn)
}
return err
}
type call struct {
fn func(*Tx) error
err chan<- error
}
type batch struct {
db *DB
timer *time.Timer
start sync.Once
calls []call
}
// trigger runs the batch if it hasn't already been run.
func (b *batch) trigger() {
b.start.Do(b.run)
}
// run performs the transactions in the batch and communicates results
// back to DB.Batch.
func (b *batch) run() {
b.db.batchMu.Lock()
b.timer.Stop()
// Make sure no new work is added to this batch, but don't break
// other batches.
if b.db.batch == b {
b.db.batch = nil
}
b.db.batchMu.Unlock()
retry:
for len(b.calls) > 0 {
var failIdx = -1
err := b.db.Update(func(tx *Tx) error {
for i, c := range b.calls {
if err := safelyCall(c.fn, tx); err != nil {
failIdx = i
return err
}
}
return nil
})
if failIdx >= 0 {
// take the failing transaction out of the batch. it's
// safe to shorten b.calls here because db.batch no longer
// points to us, and we hold the mutex anyway.
c := b.calls[failIdx]
b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
// tell the submitter re-run it solo, continue with the rest of the batch
c.err <- trySolo
continue retry
}
// pass success, or bolt internal errors, to all callers
for _, c := range b.calls {
if c.err != nil {
c.err <- err
}
}
break retry
}
}
// trySolo is a special sentinel error value used for signaling that a
// transaction function should be re-run. It should never be seen by
// callers.
var trySolo = errors.New("batch function returned an error and should be re-run solo")
type panicked struct {
reason interface{}
}
func (p panicked) Error() string {
if err, ok := p.reason.(error); ok {
return err.Error()
}
return fmt.Sprintf("panic: %v", p.reason)
}
func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
defer func() {
if p := recover(); p != nil {
err = panicked{p}
}
}()
return fn(tx)
}
// Sync executes fdatasync() against the database file handle. // Sync executes fdatasync() against the database file handle.
// //
// This is not necessary under normal operation, however, if you use NoSync // This is not necessary under normal operation, however, if you use NoSync
@@ -655,6 +806,36 @@ func (db *DB) allocate(count int) (*page, error) {
return p, nil return p, nil
} }
// grow grows the size of the database to the given sz.
func (db *DB) grow(sz int) error {
// Ignore if the new size is less than available file size.
if sz <= db.filesz {
return nil
}
// If the data is smaller than the alloc size then only allocate what's needed.
// Once it goes over the allocation size then allocate in chunks.
if db.datasz < db.AllocSize {
sz = db.datasz
} else {
sz += db.AllocSize
}
// Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
if err := db.file.Sync(); err != nil {
return fmt.Errorf("file sync error: %s", err)
}
}
db.filesz = sz
return nil
}
func (db *DB) IsReadOnly() bool { func (db *DB) IsReadOnly() bool {
return db.readOnly return db.readOnly
} }
@@ -672,6 +853,19 @@ type Options struct {
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
// grab a shared lock (UNIX). // grab a shared lock (UNIX).
ReadOnly bool ReadOnly bool
// Sets the DB.MmapFlags flag before memory mapping the file.
MmapFlags int
// InitialMmapSize is the initial mmap size of the database
// in bytes. Read transactions won't block write transaction
// if the InitialMmapSize is large enough to hold database mmap
// size. (See DB.Begin for more information)
//
// If <=0, the initial map size is 0.
// If initialMmapSize is smaller than the previous database size,
// it takes no effect.
InitialMmapSize int
} }
// DefaultOptions represent the options used if nil options are passed into Open(). // DefaultOptions represent the options used if nil options are passed into Open().

View File

@@ -29,6 +29,14 @@ type Tx struct {
pages map[pgid]*page pages map[pgid]*page
stats TxStats stats TxStats
commitHandlers []func() commitHandlers []func()
// WriteFlag specifies the flag for write-related methods like WriteTo().
// Tx opens the database file with the specified flag to copy the data.
//
// By default, the flag is unset, which works well for mostly in-memory
// workloads. For databases that are much larger than available RAM,
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
WriteFlag int
} }
// init initializes the transaction. // init initializes the transaction.
@@ -87,18 +95,21 @@ func (tx *Tx) Stats() TxStats {
// Bucket retrieves a bucket by name. // Bucket retrieves a bucket by name.
// Returns nil if the bucket does not exist. // Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) Bucket(name []byte) *Bucket { func (tx *Tx) Bucket(name []byte) *Bucket {
return tx.root.Bucket(name) return tx.root.Bucket(name)
} }
// CreateBucket creates a new bucket. // CreateBucket creates a new bucket.
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
return tx.root.CreateBucket(name) return tx.root.CreateBucket(name)
} }
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. // CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
// Returns an error if the bucket name is blank, or if the bucket name is too long. // Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
return tx.root.CreateBucketIfNotExists(name) return tx.root.CreateBucketIfNotExists(name)
} }
@@ -157,6 +168,8 @@ func (tx *Tx) Commit() error {
// Free the old root bucket. // Free the old root bucket.
tx.meta.root.root = tx.root.root tx.meta.root.root = tx.root.root
opgid := tx.meta.pgid
// Free the freelist and allocate new pages for it. This will overestimate // Free the freelist and allocate new pages for it. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad). // the size of the freelist but not underestimate the size (which would be bad).
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
@@ -171,6 +184,14 @@ func (tx *Tx) Commit() error {
} }
tx.meta.freelist = p.id tx.meta.freelist = p.id
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
// Write dirty pages to disk. // Write dirty pages to disk.
startTime = time.Now() startTime = time.Now()
if err := tx.write(); err != nil { if err := tx.write(); err != nil {
@@ -236,7 +257,8 @@ func (tx *Tx) close() {
var freelistPendingN = tx.db.freelist.pending_count() var freelistPendingN = tx.db.freelist.pending_count()
var freelistAlloc = tx.db.freelist.size() var freelistAlloc = tx.db.freelist.size()
// Remove writer lock. // Remove transaction ref & writer lock.
tx.db.rwtx = nil
tx.db.rwlock.Unlock() tx.db.rwlock.Unlock()
// Merge statistics. // Merge statistics.
@@ -250,11 +272,16 @@ func (tx *Tx) close() {
} else { } else {
tx.db.removeTx(tx) tx.db.removeTx(tx)
} }
// Clear all references.
tx.db = nil tx.db = nil
tx.meta = nil
tx.root = Bucket{tx: tx}
tx.pages = nil
} }
// Copy writes the entire database to a writer. // Copy writes the entire database to a writer.
// This function exists for backwards compatibility. Use WriteTo() in // This function exists for backwards compatibility. Use WriteTo() instead.
func (tx *Tx) Copy(w io.Writer) error { func (tx *Tx) Copy(w io.Writer) error {
_, err := tx.WriteTo(w) _, err := tx.WriteTo(w)
return err return err
@@ -263,21 +290,18 @@ func (tx *Tx) Copy(w io.Writer) error {
// WriteTo writes the entire database to a writer. // WriteTo writes the entire database to a writer.
// If err == nil then exactly tx.Size() bytes will be written into the writer. // If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
// Attempt to open reader directly. // Attempt to open reader with WriteFlag
var f *os.File f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { if err != nil {
// Fallback to a regular open if that doesn't work. return 0, err
if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil {
return 0, err
}
} }
defer func() { _ = f.Close() }()
// Copy the meta pages. // Copy the meta pages.
tx.db.metalock.Lock() tx.db.metalock.Lock()
n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
tx.db.metalock.Unlock() tx.db.metalock.Unlock()
if err != nil { if err != nil {
_ = f.Close()
return n, fmt.Errorf("meta copy: %s", err) return n, fmt.Errorf("meta copy: %s", err)
} }
@@ -285,7 +309,6 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
n += wn n += wn
if err != nil { if err != nil {
_ = f.Close()
return n, err return n, err
} }
@@ -492,7 +515,7 @@ func (tx *Tx) writeMeta() error {
} }
// page returns a reference to the page with a given id. // page returns a reference to the page with a given id.
// If page has been written to then a temporary bufferred page is returned. // If page has been written to then a temporary buffered page is returned.
func (tx *Tx) page(id pgid) *page { func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first. // Check the dirty pages first.
if tx.pages != nil { if tx.pages != nil {

View File

@@ -1 +0,0 @@
*~

View File

@@ -1,19 +0,0 @@
# This file is like Go's AUTHORS file: it lists Copyright holders.
# The list of humans who have contributd is in the CONTRIBUTORS file.
#
# To contribute to this project, because it will eventually be folded
# back in to Go itself, you need to submit a CLA:
#
# http://golang.org/doc/contribute.html#copyright
#
# Then you get added to CONTRIBUTORS and you or your company get added
# to the AUTHORS file.
Blake Mizerany <blake.mizerany@gmail.com> github=bmizerany
Daniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing
Gabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr
Google, Inc.
Keith Rarick <kr@xph.us> github=kr
Matthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan
Matt Layher <mdlayher@gmail.com> github=mdlayher
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t

View File

@@ -1,19 +0,0 @@
# This file is like Go's CONTRIBUTORS file: it lists humans.
# The list of copyright holders (which may be companies) are in the AUTHORS file.
#
# To contribute to this project, because it will eventually be folded
# back in to Go itself, you need to submit a CLA:
#
# http://golang.org/doc/contribute.html#copyright
#
# Then you get added to CONTRIBUTORS and you or your company get added
# to the AUTHORS file.
Blake Mizerany <blake.mizerany@gmail.com> github=bmizerany
Brad Fitzpatrick <bradfitz@golang.org> github=bradfitz
Daniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing
Gabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr
Keith Rarick <kr@xph.us> github=kr
Matthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan
Matt Layher <mdlayher@gmail.com> github=mdlayher
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t

View File

@@ -1,5 +0,0 @@
We only accept contributions from users who have gone through Go's
contribution process (signed a CLA).
Please acknowledge whether you have (and use the same email) if
sending a pull request.

View File

@@ -1,7 +0,0 @@
Copyright 2014 Google & the Go AUTHORS
Go AUTHORS are:
See https://code.google.com/p/go/source/browse/AUTHORS
Licensed under the terms of Go itself:
https://code.google.com/p/go/source/browse/LICENSE

View File

@@ -24,6 +24,7 @@ import (
"net/url" "net/url"
"reflect" "reflect"
"sort" "sort"
"strconv"
"sync" "sync"
"time" "time"
@@ -99,6 +100,8 @@ type Config struct {
// watch start. But if server is behind some kind of proxy, the response // watch start. But if server is behind some kind of proxy, the response
// header may be cached at proxy, and Client cannot rely on this behavior. // header may be cached at proxy, and Client cannot rely on this behavior.
// //
// Especially, wait request will ignore this timeout.
//
// One API call may send multiple requests to different etcd servers until it // One API call may send multiple requests to different etcd servers until it
// succeeds. Use context of the API to specify the overall timeout. // succeeds. Use context of the API to specify the overall timeout.
// //
@@ -162,6 +165,11 @@ type Client interface {
// this may differ from the initial Endpoints provided in the Config. // this may differ from the initial Endpoints provided in the Config.
Endpoints() []string Endpoints() []string
// SetEndpoints sets the set of API endpoints used by Client to resolve
// HTTP requests. If the given endpoints are not valid, an error will be
// returned
SetEndpoints(eps []string) error
httpClient httpClient
} }
@@ -176,7 +184,7 @@ func New(cfg Config) (Client, error) {
password: cfg.Password, password: cfg.Password,
} }
} }
if err := c.reset(cfg.Endpoints); err != nil { if err := c.SetEndpoints(cfg.Endpoints); err != nil {
return nil, err return nil, err
} }
return c, nil return c, nil
@@ -219,7 +227,7 @@ type httpClusterClient struct {
rand *rand.Rand rand *rand.Rand
} }
func (c *httpClusterClient) reset(eps []string) error { func (c *httpClusterClient) SetEndpoints(eps []string) error {
if len(eps) == 0 { if len(eps) == 0 {
return ErrNoEndpoints return ErrNoEndpoints
} }
@@ -341,7 +349,7 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
return nil return nil
} }
return c.reset(eps) return c.SetEndpoints(eps)
} }
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
@@ -378,9 +386,21 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon
return nil, nil, err return nil, nil, err
} }
isWait := false
if req != nil && req.URL != nil {
ws := req.URL.Query().Get("wait")
if len(ws) != 0 {
var err error
isWait, err = strconv.ParseBool(ws)
if err != nil {
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
}
}
}
var hctx context.Context var hctx context.Context
var hcancel context.CancelFunc var hcancel context.CancelFunc
if c.headerTimeout > 0 { if !isWait && c.headerTimeout > 0 {
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
} else { } else {
hctx, hcancel = context.WithCancel(ctx) hctx, hcancel = context.WithCancel(ctx)

View File

@@ -95,8 +95,7 @@ type store struct {
timeout time.Duration timeout time.Duration
ensuredOnce bool ensuredOnce bool
mu sync.Mutex // protect enabled mu sync.Mutex
enabled *bool
} }
type User struct { type User struct {
@@ -409,8 +408,6 @@ func (s *store) EnableAuth() error {
} }
err = s.enableAuth() err = s.enableAuth()
if err == nil { if err == nil {
b := true
s.enabled = &b
plog.Noticef("auth: enabled auth") plog.Noticef("auth: enabled auth")
} else { } else {
plog.Errorf("error enabling auth (%v)", err) plog.Errorf("error enabling auth (%v)", err)
@@ -428,8 +425,6 @@ func (s *store) DisableAuth() error {
err := s.disableAuth() err := s.disableAuth()
if err == nil { if err == nil {
b := false
s.enabled = &b
plog.Noticef("auth: disabled auth") plog.Noticef("auth: disabled auth")
} else { } else {
plog.Errorf("error disabling auth (%v)", err) plog.Errorf("error disabling auth (%v)", err)

View File

@@ -85,15 +85,10 @@ func (s *store) detectAuth() bool {
if s.server == nil { if s.server == nil {
return false return false
} }
if s.enabled != nil {
return *s.enabled
}
value, err := s.requestResource("/enabled", false) value, err := s.requestResource("/enabled", false)
if err != nil { if err != nil {
if e, ok := err.(*etcderr.Error); ok { if e, ok := err.(*etcderr.Error); ok {
if e.ErrorCode == etcderr.EcodeKeyNotFound { if e.ErrorCode == etcderr.EcodeKeyNotFound {
b := false
s.enabled = &b
return false return false
} }
} }
@@ -107,7 +102,6 @@ func (s *store) detectAuth() bool {
plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err) plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err)
return false return false
} }
s.enabled = &u
return u return u
} }

View File

@@ -13,38 +13,56 @@
It has these top-level messages: It has these top-level messages:
Request Request
Metadata Metadata
InternalRaftRequest
ResponseHeader
RangeRequest
RangeResponse
PutRequest
PutResponse
DeleteRangeRequest
DeleteRangeResponse
RequestUnion
ResponseUnion
Compare
TxnRequest
TxnResponse
CompactionRequest
CompactionResponse
*/ */
package etcdserverpb package etcdserverpb
import proto "github.com/gogo/protobuf/proto" import (
"fmt"
proto "github.com/gogo/protobuf/proto"
)
import math "math" import math "math"
// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto"
import io "io" import io "io"
import fmt "fmt"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
type Request struct { type Request struct {
ID uint64 `protobuf:"varint,1,opt" json:"ID"` ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
Method string `protobuf:"bytes,2,opt" json:"Method"` Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
Path string `protobuf:"bytes,3,opt" json:"Path"` Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
Val string `protobuf:"bytes,4,opt" json:"Val"` Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
Dir bool `protobuf:"varint,5,opt" json:"Dir"` Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
PrevValue string `protobuf:"bytes,6,opt" json:"PrevValue"` PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
PrevIndex uint64 `protobuf:"varint,7,opt" json:"PrevIndex"` PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
PrevExist *bool `protobuf:"varint,8,opt" json:"PrevExist,omitempty"` PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
Expiration int64 `protobuf:"varint,9,opt" json:"Expiration"` Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
Wait bool `protobuf:"varint,10,opt" json:"Wait"` Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
Since uint64 `protobuf:"varint,11,opt" json:"Since"` Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
Recursive bool `protobuf:"varint,12,opt" json:"Recursive"` Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
Sorted bool `protobuf:"varint,13,opt" json:"Sorted"` Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
Quorum bool `protobuf:"varint,14,opt" json:"Quorum"` Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
Time int64 `protobuf:"varint,15,opt" json:"Time"` Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
Stream bool `protobuf:"varint,16,opt" json:"Stream"` Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@@ -53,8 +71,8 @@ func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {} func (*Request) ProtoMessage() {}
type Metadata struct { type Metadata struct {
NodeID uint64 `protobuf:"varint,1,opt" json:"NodeID"` NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
ClusterID uint64 `protobuf:"varint,2,opt" json:"ClusterID"` ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@@ -62,6 +80,10 @@ func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) } func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {} func (*Metadata) ProtoMessage() {}
func init() {
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
}
func (m *Request) Marshal() (data []byte, err error) { func (m *Request) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@@ -287,8 +309,12 @@ func (m *Request) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -301,6 +327,12 @@ func (m *Request) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Request: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -308,6 +340,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
m.ID = 0 m.ID = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -324,6 +359,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -350,6 +388,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -376,6 +417,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -402,6 +446,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -419,6 +466,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -445,6 +495,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
m.PrevIndex = 0 m.PrevIndex = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -461,6 +514,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -479,6 +535,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
m.Expiration = 0 m.Expiration = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -495,6 +554,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -512,6 +574,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
m.Since = 0 m.Since = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -528,6 +593,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -545,6 +613,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -562,6 +633,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -579,6 +653,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
m.Time = 0 m.Time = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -595,6 +672,9 @@ func (m *Request) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -607,15 +687,7 @@ func (m *Request) Unmarshal(data []byte) error {
} }
m.Stream = bool(v != 0) m.Stream = bool(v != 0)
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipEtcdserver(data[iNdEx:]) skippy, err := skipEtcdserver(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -631,14 +703,21 @@ func (m *Request) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *Metadata) Unmarshal(data []byte) error { func (m *Metadata) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -651,6 +730,12 @@ func (m *Metadata) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -658,6 +743,9 @@ func (m *Metadata) Unmarshal(data []byte) error {
} }
m.NodeID = 0 m.NodeID = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -674,6 +762,9 @@ func (m *Metadata) Unmarshal(data []byte) error {
} }
m.ClusterID = 0 m.ClusterID = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -685,15 +776,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
} }
} }
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipEtcdserver(data[iNdEx:]) skippy, err := skipEtcdserver(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -709,6 +792,9 @@ func (m *Metadata) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func skipEtcdserver(data []byte) (n int, err error) { func skipEtcdserver(data []byte) (n int, err error) {
@@ -717,6 +803,9 @@ func skipEtcdserver(data []byte) (n int, err error) {
for iNdEx < l { for iNdEx < l {
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -730,7 +819,10 @@ func skipEtcdserver(data []byte) (n int, err error) {
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
switch wireType { switch wireType {
case 0: case 0:
for { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -746,6 +838,9 @@ func skipEtcdserver(data []byte) (n int, err error) {
case 2: case 2:
var length int var length int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -766,6 +861,9 @@ func skipEtcdserver(data []byte) (n int, err error) {
var innerWire uint64 var innerWire uint64
var start int = iNdEx var start int = iNdEx
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEtcdserver
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -801,4 +899,5 @@ func skipEtcdserver(data []byte) (n int, err error) {
var ( var (
ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
) )

View File

@@ -4,15 +4,20 @@
package etcdserverpb package etcdserverpb
import proto "github.com/gogo/protobuf/proto" import (
"fmt"
// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" proto "github.com/gogo/protobuf/proto"
)
import math "math"
import io "io" import io "io"
import fmt "fmt"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// An InternalRaftRequest is the union of all requests which can be // An InternalRaftRequest is the union of all requests which can be
// sent via raft. // sent via raft.
@@ -28,6 +33,9 @@ func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
func (*InternalRaftRequest) ProtoMessage() {} func (*InternalRaftRequest) ProtoMessage() {}
func init() {
proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
}
func (m *InternalRaftRequest) Marshal() (data []byte, err error) { func (m *InternalRaftRequest) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@@ -162,48 +170,16 @@ func sovRaftInternal(x uint64) (n int) {
func sozRaftInternal(x uint64) (n int) { func sozRaftInternal(x uint64) (n int) {
return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func (this *InternalRaftRequest) GetValue() interface{} {
if this.V2 != nil {
return this.V2
}
if this.Range != nil {
return this.Range
}
if this.Put != nil {
return this.Put
}
if this.DeleteRange != nil {
return this.DeleteRange
}
if this.Txn != nil {
return this.Txn
}
return nil
}
func (this *InternalRaftRequest) SetValue(value interface{}) bool {
switch vt := value.(type) {
case *Request:
this.V2 = vt
case *RangeRequest:
this.Range = vt
case *PutRequest:
this.Put = vt
case *DeleteRangeRequest:
this.DeleteRange = vt
case *TxnRequest:
this.Txn = vt
default:
return false
}
return true
}
func (m *InternalRaftRequest) Unmarshal(data []byte) error { func (m *InternalRaftRequest) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -216,6 +192,12 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
@@ -223,6 +205,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -253,6 +238,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -283,6 +271,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -313,6 +304,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -343,6 +337,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -368,15 +365,7 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaftInternal(data[iNdEx:]) skippy, err := skipRaftInternal(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -391,6 +380,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func skipRaftInternal(data []byte) (n int, err error) { func skipRaftInternal(data []byte) (n int, err error) {
@@ -399,6 +391,9 @@ func skipRaftInternal(data []byte) (n int, err error) {
for iNdEx < l { for iNdEx < l {
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -412,7 +407,10 @@ func skipRaftInternal(data []byte) (n int, err error) {
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
switch wireType { switch wireType {
case 0: case 0:
for { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -428,6 +426,9 @@ func skipRaftInternal(data []byte) (n int, err error) {
case 2: case 2:
var length int var length int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -448,6 +449,9 @@ func skipRaftInternal(data []byte) (n int, err error) {
var innerWire uint64 var innerWire uint64
var start int = iNdEx var start int = iNdEx
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaftInternal
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -483,4 +487,5 @@ func skipRaftInternal(data []byte) (n int, err error) {
var ( var (
ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow")
) )

View File

@@ -13,12 +13,9 @@ option (gogoproto.goproto_getters_all) = false;
// An InternalRaftRequest is the union of all requests which can be // An InternalRaftRequest is the union of all requests which can be
// sent via raft. // sent via raft.
message InternalRaftRequest { message InternalRaftRequest {
option (gogoproto.onlyone) = true;
oneof value {
Request v2 = 1; Request v2 = 1;
RangeRequest range = 2; RangeRequest range = 2;
PutRequest put = 3; PutRequest put = 3;
DeleteRangeRequest delete_range = 4; DeleteRangeRequest delete_range = 4;
TxnRequest txn = 5; TxnRequest txn = 5;
}
} }

File diff suppressed because it is too large Load Diff

View File

@@ -174,12 +174,17 @@ type EtcdServer struct {
// configuration is considered static for the lifetime of the EtcdServer. // configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) { func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
st := store.New(StoreClusterPrefix, StoreKeysPrefix) st := store.New(StoreClusterPrefix, StoreKeysPrefix)
var w *wal.WAL var w *wal.WAL
var n raft.Node var n raft.Node
var s *raft.MemoryStorage var s *raft.MemoryStorage
var id types.ID var id types.ID
var cl *cluster var cl *cluster
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
return nil, fmt.Errorf("cannot access data directory: %v", terr)
}
// Run the migrations. // Run the migrations.
dataVer, err := version.DetectDataDir(cfg.DataDir) dataVer, err := version.DetectDataDir(cfg.DataDir)
if err != nil { if err != nil {
@@ -189,11 +194,6 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
return nil, err return nil, err
} }
err = os.MkdirAll(cfg.MemberDir(), privateDirMode)
if err != nil && err != os.ErrExist {
return nil, err
}
haveWAL := wal.Exist(cfg.WALDir()) haveWAL := wal.Exist(cfg.WALDir())
ss := snap.New(cfg.SnapDir()) ss := snap.New(cfg.SnapDir())
@@ -255,10 +255,6 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
cfg.PrintWithInitial() cfg.PrintWithInitial()
id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
case haveWAL: case haveWAL:
if err := fileutil.IsDirWriteable(cfg.DataDir); err != nil {
return nil, fmt.Errorf("cannot write to data directory: %v", err)
}
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
return nil, fmt.Errorf("cannot write to member directory: %v", err) return nil, fmt.Errorf("cannot write to member directory: %v", err)
} }
@@ -295,6 +291,10 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
return nil, fmt.Errorf("unsupported bootstrap config") return nil, fmt.Errorf("unsupported bootstrap config")
} }
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
return nil, fmt.Errorf("cannot access member directory: %v", terr)
}
sstats := &stats.ServerStats{ sstats := &stats.ServerStats{
Name: cfg.Name, Name: cfg.Name,
ID: id.String(), ID: id.String(),

View File

@@ -19,6 +19,7 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
dstorage "github.com/coreos/etcd/storage" dstorage "github.com/coreos/etcd/storage"
"github.com/coreos/etcd/storage/storagepb"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@@ -106,17 +107,24 @@ func doTxn(kv dstorage.KV, rt *pb.TxnRequest) *pb.TxnResponse {
} }
func doUnion(kv dstorage.KV, union *pb.RequestUnion) *pb.ResponseUnion { func doUnion(kv dstorage.KV, union *pb.RequestUnion) *pb.ResponseUnion {
switch { switch tv := union.Request.(type) {
case union.RequestRange != nil: case *pb.RequestUnion_RequestRange:
return &pb.ResponseUnion{ResponseRange: doRange(kv, union.RequestRange)} if tv.RequestRange != nil {
case union.RequestPut != nil: return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{ResponseRange: doRange(kv, tv.RequestRange)}}
return &pb.ResponseUnion{ResponsePut: doPut(kv, union.RequestPut)} }
case union.RequestDeleteRange != nil: case *pb.RequestUnion_RequestPut:
return &pb.ResponseUnion{ResponseDeleteRange: doDeleteRange(kv, union.RequestDeleteRange)} if tv.RequestPut != nil {
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponsePut{ResponsePut: doPut(kv, tv.RequestPut)}}
}
case *pb.RequestUnion_RequestDeleteRange:
if tv.RequestDeleteRange != nil {
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseDeleteRange{ResponseDeleteRange: doDeleteRange(kv, tv.RequestDeleteRange)}}
}
default: default:
// empty union // empty union
return nil return nil
} }
return nil
} }
func doCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) { func doCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) {
@@ -124,20 +132,35 @@ func doCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) {
if err != nil { if err != nil {
return rev, false return rev, false
} }
var ckv storagepb.KeyValue
ckv := ckvs[0] if len(ckvs) != 0 {
ckv = ckvs[0]
}
// -1 is less, 0 is equal, 1 is greater // -1 is less, 0 is equal, 1 is greater
var result int var result int
switch c.Target { switch c.Target {
case pb.Compare_VALUE: case pb.Compare_VALUE:
result = bytes.Compare(ckv.Value, c.Value) tv, _ := c.TargetUnion.(*pb.Compare_Value)
if tv != nil {
result = bytes.Compare(ckv.Value, tv.Value)
}
case pb.Compare_CREATE: case pb.Compare_CREATE:
result = compareInt64(ckv.CreateRevision, c.CreateRevision) tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision)
if tv != nil {
result = compareInt64(ckv.CreateRevision, tv.CreateRevision)
}
case pb.Compare_MOD: case pb.Compare_MOD:
result = compareInt64(ckv.ModRevision, c.ModRevision) tv, _ := c.TargetUnion.(*pb.Compare_ModRevision)
if tv != nil {
result = compareInt64(ckv.ModRevision, tv.ModRevision)
}
case pb.Compare_VERSION: case pb.Compare_VERSION:
result = compareInt64(ckv.Version, c.Version) tv, _ := c.TargetUnion.(*pb.Compare_Version)
if tv != nil {
result = compareInt64(ckv.Version, tv.Version)
}
} }
switch c.Result { switch c.Result {

View File

@@ -25,6 +25,8 @@ import (
const ( const (
privateFileMode = 0600 privateFileMode = 0600
// owner can make/remove files inside the directory
privateDirMode = 0700
) )
var ( var (
@@ -55,3 +57,13 @@ func ReadDir(dirpath string) ([]string, error) {
sort.Strings(names) sort.Strings(names)
return names, nil return names, nil
} }
// TouchDirAll is simliar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
func TouchDirAll(dir string) error {
err := os.MkdirAll(dir, privateDirMode)
if err != nil && err != os.ErrExist {
return err
}
return IsDirWriteable(dir)
}

View File

@@ -0,0 +1,41 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ioutil
import (
"io"
"os"
)
// WriteAndSyncFile behaviors just like ioutil.WriteFile in standard library
// but calls Sync before closing the file. WriteAndSyncFile guarantees the data
// is synced if there is no error returned.
func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
if err == nil {
err = f.Sync()
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}

View File

@@ -21,17 +21,19 @@ import (
"time" "time"
) )
// NewKeepAliveListener returns a listener that listens on the given address. type keepAliveConn interface {
// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html SetKeepAlive(bool) error
func NewKeepAliveListener(addr string, scheme string, info TLSInfo) (net.Listener, error) { SetKeepAlivePeriod(d time.Duration) error
l, err := net.Listen("tcp", addr) }
if err != nil {
return nil, err
}
// NewKeepAliveListener returns a listener that listens on the given address.
// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
func NewKeepAliveListener(l net.Listener, scheme string, info TLSInfo) (net.Listener, error) {
if scheme == "https" { if scheme == "https" {
if info.Empty() { if info.Empty() {
return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
} }
cfg, err := info.ServerConfig() cfg, err := info.ServerConfig()
if err != nil { if err != nil {
@@ -53,13 +55,13 @@ func (kln *keepaliveListener) Accept() (net.Conn, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
tcpc := c.(*net.TCPConn) kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30 // default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75 // default on osx: 30 + 8 * 75
tcpc.SetKeepAlive(true) kac.SetKeepAlive(true)
tcpc.SetKeepAlivePeriod(30 * time.Second) kac.SetKeepAlivePeriod(30 * time.Second)
return tcpc, nil return c, nil
} }
// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. // A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
@@ -75,12 +77,12 @@ func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
if err != nil { if err != nil {
return return
} }
tcpc := c.(*net.TCPConn) kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30 // default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75 // default on osx: 30 + 8 * 75
tcpc.SetKeepAlive(true) kac.SetKeepAlive(true)
tcpc.SetKeepAlivePeriod(30 * time.Second) kac.SetKeepAlivePeriod(30 * time.Second)
c = tls.Server(c, l.config) c = tls.Server(c, l.config)
return return
} }

View File

@@ -0,0 +1,70 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package netutil provides network utility functions, complementing the more
// common ones in the net package.
package transport
import (
"errors"
"net"
"sync"
"time"
)
var (
ErrNotTCP = errors.New("only tcp connections have keepalive")
)
// LimitListener returns a Listener that accepts at most n simultaneous
// connections from the provided Listener.
func LimitListener(l net.Listener, n int) net.Listener {
return &limitListener{l, make(chan struct{}, n)}
}
type limitListener struct {
net.Listener
sem chan struct{}
}
func (l *limitListener) acquire() { l.sem <- struct{}{} }
func (l *limitListener) release() { <-l.sem }
func (l *limitListener) Accept() (net.Conn, error) {
l.acquire()
c, err := l.Listener.Accept()
if err != nil {
l.release()
return nil, err
}
return &limitListenerConn{Conn: c, release: l.release}, nil
}
type limitListenerConn struct {
net.Conn
releaseOnce sync.Once
release func()
}
func (l *limitListenerConn) Close() error {
err := l.Conn.Close()
l.releaseOnce.Do(l.release)
return err
}
func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
return ErrNotTCP
}
return tcpc.SetKeepAlive(doKeepAlive)
}
func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
return ErrNotTCP
}
return tcpc.SetKeepAlivePeriod(d)
}

View File

@@ -19,16 +19,19 @@
*/ */
package raftpb package raftpb
import proto "github.com/gogo/protobuf/proto" import (
"fmt"
proto "github.com/gogo/protobuf/proto"
)
import math "math" import math "math"
// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto"
import io "io" import io "io"
import fmt "fmt"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
type EntryType int32 type EntryType int32
@@ -164,10 +167,10 @@ func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
} }
type Entry struct { type Entry struct {
Type EntryType `protobuf:"varint,1,opt,enum=raftpb.EntryType" json:"Type"` Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
Term uint64 `protobuf:"varint,2,opt" json:"Term"` Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
Index uint64 `protobuf:"varint,3,opt" json:"Index"` Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
Data []byte `protobuf:"bytes,4,opt" json:"Data,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@@ -236,10 +239,10 @@ func (m *ConfState) String() string { return proto.CompactTextString(m) }
func (*ConfState) ProtoMessage() {} func (*ConfState) ProtoMessage() {}
type ConfChange struct { type ConfChange struct {
ID uint64 `protobuf:"varint,1,opt" json:"ID"` ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
Type ConfChangeType `protobuf:"varint,2,opt,enum=raftpb.ConfChangeType" json:"Type"` Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
NodeID uint64 `protobuf:"varint,3,opt" json:"NodeID"` NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
Context []byte `protobuf:"bytes,4,opt" json:"Context,omitempty"` Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@@ -248,6 +251,13 @@ func (m *ConfChange) String() string { return proto.CompactTextString(m) }
func (*ConfChange) ProtoMessage() {} func (*ConfChange) ProtoMessage() {}
func init() { func init() {
proto.RegisterType((*Entry)(nil), "raftpb.Entry")
proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
proto.RegisterType((*Message)(nil), "raftpb.Message")
proto.RegisterType((*HardState)(nil), "raftpb.HardState")
proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
@@ -681,8 +691,12 @@ func (m *Entry) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -695,6 +709,12 @@ func (m *Entry) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Entry: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -702,6 +722,9 @@ func (m *Entry) Unmarshal(data []byte) error {
} }
m.Type = 0 m.Type = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -718,6 +741,9 @@ func (m *Entry) Unmarshal(data []byte) error {
} }
m.Term = 0 m.Term = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -734,6 +760,9 @@ func (m *Entry) Unmarshal(data []byte) error {
} }
m.Index = 0 m.Index = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -750,6 +779,9 @@ func (m *Entry) Unmarshal(data []byte) error {
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -767,18 +799,13 @@ func (m *Entry) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Data = append([]byte{}, data[iNdEx:postIndex]...) m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaft(data[iNdEx:]) skippy, err := skipRaft(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -794,14 +821,21 @@ func (m *Entry) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *SnapshotMetadata) Unmarshal(data []byte) error { func (m *SnapshotMetadata) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -814,6 +848,12 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
@@ -821,6 +861,9 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -848,6 +891,9 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error {
} }
m.Index = 0 m.Index = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -864,6 +910,9 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error {
} }
m.Term = 0 m.Term = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -875,15 +924,7 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error {
} }
} }
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaft(data[iNdEx:]) skippy, err := skipRaft(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -899,14 +940,21 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *Snapshot) Unmarshal(data []byte) error { func (m *Snapshot) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -919,6 +967,12 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
@@ -926,6 +980,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -943,7 +1000,10 @@ func (m *Snapshot) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Data = append([]byte{}, data[iNdEx:postIndex]...) m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex iNdEx = postIndex
case 2: case 2:
if wireType != 2 { if wireType != 2 {
@@ -951,6 +1011,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -973,15 +1036,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaft(data[iNdEx:]) skippy, err := skipRaft(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -997,14 +1052,21 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *Message) Unmarshal(data []byte) error { func (m *Message) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1017,6 +1079,12 @@ func (m *Message) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Message: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -1024,6 +1092,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.Type = 0 m.Type = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1040,6 +1111,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.To = 0 m.To = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1056,6 +1130,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.From = 0 m.From = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1072,6 +1149,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.Term = 0 m.Term = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1088,6 +1168,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.LogTerm = 0 m.LogTerm = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1104,6 +1187,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.Index = 0 m.Index = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1120,6 +1206,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1148,6 +1237,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.Commit = 0 m.Commit = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1164,6 +1256,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1191,6 +1286,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1208,6 +1306,9 @@ func (m *Message) Unmarshal(data []byte) error {
} }
m.RejectHint = 0 m.RejectHint = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1219,15 +1320,7 @@ func (m *Message) Unmarshal(data []byte) error {
} }
} }
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaft(data[iNdEx:]) skippy, err := skipRaft(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -1243,14 +1336,21 @@ func (m *Message) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *HardState) Unmarshal(data []byte) error { func (m *HardState) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1263,6 +1363,12 @@ func (m *HardState) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: HardState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -1270,6 +1376,9 @@ func (m *HardState) Unmarshal(data []byte) error {
} }
m.Term = 0 m.Term = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1286,6 +1395,9 @@ func (m *HardState) Unmarshal(data []byte) error {
} }
m.Vote = 0 m.Vote = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1302,6 +1414,9 @@ func (m *HardState) Unmarshal(data []byte) error {
} }
m.Commit = 0 m.Commit = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1313,15 +1428,7 @@ func (m *HardState) Unmarshal(data []byte) error {
} }
} }
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaft(data[iNdEx:]) skippy, err := skipRaft(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -1337,14 +1444,21 @@ func (m *HardState) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *ConfState) Unmarshal(data []byte) error { func (m *ConfState) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1357,6 +1471,12 @@ func (m *ConfState) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -1364,6 +1484,9 @@ func (m *ConfState) Unmarshal(data []byte) error {
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1376,15 +1499,7 @@ func (m *ConfState) Unmarshal(data []byte) error {
} }
m.Nodes = append(m.Nodes, v) m.Nodes = append(m.Nodes, v)
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaft(data[iNdEx:]) skippy, err := skipRaft(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -1400,14 +1515,21 @@ func (m *ConfState) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *ConfChange) Unmarshal(data []byte) error { func (m *ConfChange) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1420,6 +1542,12 @@ func (m *ConfChange) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -1427,6 +1555,9 @@ func (m *ConfChange) Unmarshal(data []byte) error {
} }
m.ID = 0 m.ID = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1443,6 +1574,9 @@ func (m *ConfChange) Unmarshal(data []byte) error {
} }
m.Type = 0 m.Type = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1459,6 +1593,9 @@ func (m *ConfChange) Unmarshal(data []byte) error {
} }
m.NodeID = 0 m.NodeID = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1475,6 +1612,9 @@ func (m *ConfChange) Unmarshal(data []byte) error {
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -1492,18 +1632,13 @@ func (m *ConfChange) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Context = append([]byte{}, data[iNdEx:postIndex]...) m.Context = append(m.Context[:0], data[iNdEx:postIndex]...)
if m.Context == nil {
m.Context = []byte{}
}
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRaft(data[iNdEx:]) skippy, err := skipRaft(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -1519,6 +1654,9 @@ func (m *ConfChange) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func skipRaft(data []byte) (n int, err error) { func skipRaft(data []byte) (n int, err error) {
@@ -1527,6 +1665,9 @@ func skipRaft(data []byte) (n int, err error) {
for iNdEx < l { for iNdEx < l {
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -1540,7 +1681,10 @@ func skipRaft(data []byte) (n int, err error) {
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
switch wireType { switch wireType {
case 0: case 0:
for { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -1556,6 +1700,9 @@ func skipRaft(data []byte) (n int, err error) {
case 2: case 2:
var length int var length int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -1576,6 +1723,9 @@ func skipRaft(data []byte) (n int, err error) {
var innerWire uint64 var innerWire uint64
var start int = iNdEx var start int = iNdEx
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRaft
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -1611,4 +1761,5 @@ func skipRaft(data []byte) (n int, err error) {
var ( var (
ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
) )

View File

@@ -13,16 +13,19 @@
*/ */
package snappb package snappb
import proto "github.com/gogo/protobuf/proto" import (
"fmt"
proto "github.com/gogo/protobuf/proto"
)
import math "math" import math "math"
// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto"
import io "io" import io "io"
import fmt "fmt"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
type Snapshot struct { type Snapshot struct {
@@ -35,6 +38,9 @@ func (m *Snapshot) Reset() { *m = Snapshot{} }
func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (m *Snapshot) String() string { return proto.CompactTextString(m) }
func (*Snapshot) ProtoMessage() {} func (*Snapshot) ProtoMessage() {}
func init() {
proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
}
func (m *Snapshot) Marshal() (data []byte, err error) { func (m *Snapshot) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@@ -123,8 +129,12 @@ func (m *Snapshot) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnap
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -137,6 +147,12 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: snapshot: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -144,6 +160,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
m.Crc = 0 m.Crc = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnap
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -160,6 +179,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnap
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -177,18 +199,13 @@ func (m *Snapshot) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Data = append([]byte{}, data[iNdEx:postIndex]...) m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipSnap(data[iNdEx:]) skippy, err := skipSnap(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -204,6 +221,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func skipSnap(data []byte) (n int, err error) { func skipSnap(data []byte) (n int, err error) {
@@ -212,6 +232,9 @@ func skipSnap(data []byte) (n int, err error) {
for iNdEx < l { for iNdEx < l {
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnap
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -225,7 +248,10 @@ func skipSnap(data []byte) (n int, err error) {
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
switch wireType { switch wireType {
case 0: case 0:
for { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnap
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -241,6 +267,9 @@ func skipSnap(data []byte) (n int, err error) {
case 2: case 2:
var length int var length int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnap
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -261,6 +290,9 @@ func skipSnap(data []byte) (n int, err error) {
var innerWire uint64 var innerWire uint64
var start int = iNdEx var start int = iNdEx
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnap
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -296,4 +328,5 @@ func skipSnap(data []byte) (n int, err error) {
var ( var (
ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
) )

View File

@@ -14,15 +14,20 @@
*/ */
package storagepb package storagepb
import proto "github.com/gogo/protobuf/proto" import (
"fmt"
// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" proto "github.com/gogo/protobuf/proto"
)
import math "math"
import io "io" import io "io"
import fmt "fmt"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Event_EventType int32 type Event_EventType int32
@@ -76,6 +81,8 @@ func (m *Event) String() string { return proto.CompactTextString(m) }
func (*Event) ProtoMessage() {} func (*Event) ProtoMessage() {}
func init() { func init() {
proto.RegisterType((*KeyValue)(nil), "storagepb.KeyValue")
proto.RegisterType((*Event)(nil), "storagepb.Event")
proto.RegisterEnum("storagepb.Event_EventType", Event_EventType_name, Event_EventType_value) proto.RegisterEnum("storagepb.Event_EventType", Event_EventType_name, Event_EventType_value)
} }
func (m *KeyValue) Marshal() (data []byte, err error) { func (m *KeyValue) Marshal() (data []byte, err error) {
@@ -244,8 +251,12 @@ func (m *KeyValue) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -258,6 +269,12 @@ func (m *KeyValue) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
@@ -265,6 +282,9 @@ func (m *KeyValue) Unmarshal(data []byte) error {
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -282,7 +302,10 @@ func (m *KeyValue) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Key = append([]byte{}, data[iNdEx:postIndex]...) m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex iNdEx = postIndex
case 2: case 2:
if wireType != 0 { if wireType != 0 {
@@ -290,6 +313,9 @@ func (m *KeyValue) Unmarshal(data []byte) error {
} }
m.CreateRevision = 0 m.CreateRevision = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -306,6 +332,9 @@ func (m *KeyValue) Unmarshal(data []byte) error {
} }
m.ModRevision = 0 m.ModRevision = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -322,6 +351,9 @@ func (m *KeyValue) Unmarshal(data []byte) error {
} }
m.Version = 0 m.Version = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -338,6 +370,9 @@ func (m *KeyValue) Unmarshal(data []byte) error {
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -355,18 +390,13 @@ func (m *KeyValue) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Value = append([]byte{}, data[iNdEx:postIndex]...) m.Value = append(m.Value[:0], data[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipKv(data[iNdEx:]) skippy, err := skipKv(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -381,14 +411,21 @@ func (m *KeyValue) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *Event) Unmarshal(data []byte) error { func (m *Event) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -401,6 +438,12 @@ func (m *Event) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Event: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -408,6 +451,9 @@ func (m *Event) Unmarshal(data []byte) error {
} }
m.Type = 0 m.Type = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -424,6 +470,9 @@ func (m *Event) Unmarshal(data []byte) error {
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -449,15 +498,7 @@ func (m *Event) Unmarshal(data []byte) error {
} }
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipKv(data[iNdEx:]) skippy, err := skipKv(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -472,6 +513,9 @@ func (m *Event) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func skipKv(data []byte) (n int, err error) { func skipKv(data []byte) (n int, err error) {
@@ -480,6 +524,9 @@ func skipKv(data []byte) (n int, err error) {
for iNdEx < l { for iNdEx < l {
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -493,7 +540,10 @@ func skipKv(data []byte) (n int, err error) {
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
switch wireType { switch wireType {
case 0: case 0:
for { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -509,6 +559,9 @@ func skipKv(data []byte) (n int, err error) {
case 2: case 2:
var length int var length int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -529,6 +582,9 @@ func skipKv(data []byte) (n int, err error) {
var innerWire uint64 var innerWire uint64
var start int = iNdEx var start int = iNdEx
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -564,4 +620,5 @@ func skipKv(data []byte) (n int, err error) {
var ( var (
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
) )

View File

@@ -27,7 +27,7 @@ import (
var ( var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with. // MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "2.1.0" MinClusterVersion = "2.1.0"
Version = "2.2.2+git" Version = "2.2.5"
// Git SHA Value will be set during build // Git SHA Value will be set during build
GitSHA = "Not provided (use ./build instead of go build)" GitSHA = "Not provided (use ./build instead of go build)"

View File

@@ -14,16 +14,19 @@
*/ */
package walpb package walpb
import proto "github.com/gogo/protobuf/proto" import (
"fmt"
proto "github.com/gogo/protobuf/proto"
)
import math "math" import math "math"
// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto"
import io "io" import io "io"
import fmt "fmt"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
type Record struct { type Record struct {
@@ -47,6 +50,10 @@ func (m *Snapshot) Reset() { *m = Snapshot{} }
func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (m *Snapshot) String() string { return proto.CompactTextString(m) }
func (*Snapshot) ProtoMessage() {} func (*Snapshot) ProtoMessage() {}
func init() {
proto.RegisterType((*Record)(nil), "walpb.Record")
proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
}
func (m *Record) Marshal() (data []byte, err error) { func (m *Record) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@@ -177,8 +184,12 @@ func (m *Record) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -191,6 +202,12 @@ func (m *Record) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Record: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -198,6 +215,9 @@ func (m *Record) Unmarshal(data []byte) error {
} }
m.Type = 0 m.Type = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -214,6 +234,9 @@ func (m *Record) Unmarshal(data []byte) error {
} }
m.Crc = 0 m.Crc = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -230,6 +253,9 @@ func (m *Record) Unmarshal(data []byte) error {
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -247,18 +273,13 @@ func (m *Record) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Data = append([]byte{}, data[iNdEx:postIndex]...) m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex iNdEx = postIndex
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRecord(data[iNdEx:]) skippy, err := skipRecord(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -274,14 +295,21 @@ func (m *Record) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func (m *Snapshot) Unmarshal(data []byte) error { func (m *Snapshot) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
preIndex := iNdEx
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -294,6 +322,12 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
@@ -301,6 +335,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
m.Index = 0 m.Index = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -317,6 +354,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
m.Term = 0 m.Term = 0
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -328,15 +368,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
} }
default: default:
var sizeOfWire int iNdEx = preIndex
for {
sizeOfWire++
wire >>= 7
if wire == 0 {
break
}
}
iNdEx -= sizeOfWire
skippy, err := skipRecord(data[iNdEx:]) skippy, err := skipRecord(data[iNdEx:])
if err != nil { if err != nil {
return err return err
@@ -352,6 +384,9 @@ func (m *Snapshot) Unmarshal(data []byte) error {
} }
} }
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil return nil
} }
func skipRecord(data []byte) (n int, err error) { func skipRecord(data []byte) (n int, err error) {
@@ -360,6 +395,9 @@ func skipRecord(data []byte) (n int, err error) {
for iNdEx < l { for iNdEx < l {
var wire uint64 var wire uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -373,7 +411,10 @@ func skipRecord(data []byte) (n int, err error) {
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
switch wireType { switch wireType {
case 0: case 0:
for { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -389,6 +430,9 @@ func skipRecord(data []byte) (n int, err error) {
case 2: case 2:
var length int var length int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -409,6 +453,9 @@ func skipRecord(data []byte) (n int, err error) {
var innerWire uint64 var innerWire uint64
var start int = iNdEx var start int = iNdEx
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l { if iNdEx >= l {
return 0, io.ErrUnexpectedEOF return 0, io.ErrUnexpectedEOF
} }
@@ -444,4 +491,5 @@ func skipRecord(data []byte) (n int, err error) {
var ( var (
ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
) )

View File

@@ -200,3 +200,28 @@ Apache License
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
Third Party Sources Bundled
This project includes code derived from the MIT licensed naegelejd/go-acl
project. Here's a copy of its license:
Copyright (c) 2015 Joseph Naegele
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -2,8 +2,6 @@
The API defined here is proposed, experimental, and (for now) subject to change at any time. The API defined here is proposed, experimental, and (for now) subject to change at any time.
**Do not use it.**
If you think you want to use it, or for any other queries, contact <rkt-dev@googlegroups.com> or file an [issue](https://github.com/coreos/rkt/issues/new) If you think you want to use it, or for any other queries, contact <rkt-dev@googlegroups.com> or file an [issue](https://github.com/coreos/rkt/issues/new)
For more information, see: For more information, see:

View File

@@ -51,6 +51,10 @@ var _ = proto.Marshal
var _ = fmt.Errorf var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
// ImageType defines the supported image type. // ImageType defines the supported image type.
type ImageType int32 type ImageType int32
@@ -77,6 +81,7 @@ var ImageType_value = map[string]int32{
func (x ImageType) String() string { func (x ImageType) String() string {
return proto.EnumName(ImageType_name, int32(x)) return proto.EnumName(ImageType_name, int32(x))
} }
func (ImageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
// AppState defines the possible states of the app. // AppState defines the possible states of the app.
type AppState int32 type AppState int32
@@ -101,6 +106,7 @@ var AppState_value = map[string]int32{
func (x AppState) String() string { func (x AppState) String() string {
return proto.EnumName(AppState_name, int32(x)) return proto.EnumName(AppState_name, int32(x))
} }
func (AppState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
// PodState defines the possible states of the pod. // PodState defines the possible states of the pod.
// See https://github.com/coreos/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed // See https://github.com/coreos/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed
@@ -148,6 +154,7 @@ var PodState_value = map[string]int32{
func (x PodState) String() string { func (x PodState) String() string {
return proto.EnumName(PodState_name, int32(x)) return proto.EnumName(PodState_name, int32(x))
} }
func (PodState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// EventType defines the type of the events that will be received via ListenEvents(). // EventType defines the type of the events that will be received via ListenEvents().
type EventType int32 type EventType int32
@@ -196,6 +203,7 @@ var EventType_value = map[string]int32{
func (x EventType) String() string { func (x EventType) String() string {
return proto.EnumName(EventType_name, int32(x)) return proto.EnumName(EventType_name, int32(x))
} }
func (EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// ImageFormat defines the format of the image. // ImageFormat defines the format of the image.
type ImageFormat struct { type ImageFormat struct {
@@ -205,9 +213,10 @@ type ImageFormat struct {
Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
} }
func (m *ImageFormat) Reset() { *m = ImageFormat{} } func (m *ImageFormat) Reset() { *m = ImageFormat{} }
func (m *ImageFormat) String() string { return proto.CompactTextString(m) } func (m *ImageFormat) String() string { return proto.CompactTextString(m) }
func (*ImageFormat) ProtoMessage() {} func (*ImageFormat) ProtoMessage() {}
func (*ImageFormat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
// Image describes the image's information. // Image describes the image's information.
type Image struct { type Image struct {
@@ -232,9 +241,10 @@ type Image struct {
Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"` Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"`
} }
func (m *Image) Reset() { *m = Image{} } func (m *Image) Reset() { *m = Image{} }
func (m *Image) String() string { return proto.CompactTextString(m) } func (m *Image) String() string { return proto.CompactTextString(m) }
func (*Image) ProtoMessage() {} func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Image) GetBaseFormat() *ImageFormat { func (m *Image) GetBaseFormat() *ImageFormat {
if m != nil { if m != nil {
@@ -260,9 +270,10 @@ type Network struct {
Ipv6 string `protobuf:"bytes,3,opt,name=ipv6" json:"ipv6,omitempty"` Ipv6 string `protobuf:"bytes,3,opt,name=ipv6" json:"ipv6,omitempty"`
} }
func (m *Network) Reset() { *m = Network{} } func (m *Network) Reset() { *m = Network{} }
func (m *Network) String() string { return proto.CompactTextString(m) } func (m *Network) String() string { return proto.CompactTextString(m) }
func (*Network) ProtoMessage() {} func (*Network) ProtoMessage() {}
func (*Network) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// App describes the information of an app that's running in a pod. // App describes the information of an app that's running in a pod.
type App struct { type App struct {
@@ -280,9 +291,10 @@ type App struct {
Annotations []*KeyValue `protobuf:"bytes,5,rep,name=annotations" json:"annotations,omitempty"` Annotations []*KeyValue `protobuf:"bytes,5,rep,name=annotations" json:"annotations,omitempty"`
} }
func (m *App) Reset() { *m = App{} } func (m *App) Reset() { *m = App{} }
func (m *App) String() string { return proto.CompactTextString(m) } func (m *App) String() string { return proto.CompactTextString(m) }
func (*App) ProtoMessage() {} func (*App) ProtoMessage() {}
func (*App) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *App) GetImage() *Image { func (m *App) GetImage() *Image {
if m != nil { if m != nil {
@@ -327,9 +339,10 @@ type Pod struct {
Annotations []*KeyValue `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty"` Annotations []*KeyValue `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty"`
} }
func (m *Pod) Reset() { *m = Pod{} } func (m *Pod) Reset() { *m = Pod{} }
func (m *Pod) String() string { return proto.CompactTextString(m) } func (m *Pod) String() string { return proto.CompactTextString(m) }
func (*Pod) ProtoMessage() {} func (*Pod) ProtoMessage() {}
func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Pod) GetApps() []*App { func (m *Pod) GetApps() []*App {
if m != nil { if m != nil {
@@ -354,14 +367,15 @@ func (m *Pod) GetAnnotations() []*KeyValue {
type KeyValue struct { type KeyValue struct {
// Key part of the key-value pair. // Key part of the key-value pair.
Key string `protobuf:"bytes,1,opt" json:"Key,omitempty"` Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"`
// Value part of the key-value pair. // Value part of the key-value pair.
Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
} }
func (m *KeyValue) Reset() { *m = KeyValue{} } func (m *KeyValue) Reset() { *m = KeyValue{} }
func (m *KeyValue) String() string { return proto.CompactTextString(m) } func (m *KeyValue) String() string { return proto.CompactTextString(m) }
func (*KeyValue) ProtoMessage() {} func (*KeyValue) ProtoMessage() {}
func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
// PodFilter defines the condition that the returned pods need to satisfy in ListPods(). // PodFilter defines the condition that the returned pods need to satisfy in ListPods().
// The conditions are combined by 'AND', and different filters are combined by 'OR'. // The conditions are combined by 'AND', and different filters are combined by 'OR'.
@@ -380,9 +394,10 @@ type PodFilter struct {
Annotations []*KeyValue `protobuf:"bytes,6,rep,name=annotations" json:"annotations,omitempty"` Annotations []*KeyValue `protobuf:"bytes,6,rep,name=annotations" json:"annotations,omitempty"`
} }
func (m *PodFilter) Reset() { *m = PodFilter{} } func (m *PodFilter) Reset() { *m = PodFilter{} }
func (m *PodFilter) String() string { return proto.CompactTextString(m) } func (m *PodFilter) String() string { return proto.CompactTextString(m) }
func (*PodFilter) ProtoMessage() {} func (*PodFilter) ProtoMessage() {}
func (*PodFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *PodFilter) GetAnnotations() []*KeyValue { func (m *PodFilter) GetAnnotations() []*KeyValue {
if m != nil { if m != nil {
@@ -413,11 +428,14 @@ type ImageFilter struct {
ImportedBefore int64 `protobuf:"varint,7,opt,name=imported_before" json:"imported_before,omitempty"` ImportedBefore int64 `protobuf:"varint,7,opt,name=imported_before" json:"imported_before,omitempty"`
// If not empty, the images that have all of the annotations will be returned. // If not empty, the images that have all of the annotations will be returned.
Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"` Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"`
// If not empty, the images that have any of the exact full names will be returned.
FullNames []string `protobuf:"bytes,9,rep,name=full_names" json:"full_names,omitempty"`
} }
func (m *ImageFilter) Reset() { *m = ImageFilter{} } func (m *ImageFilter) Reset() { *m = ImageFilter{} }
func (m *ImageFilter) String() string { return proto.CompactTextString(m) } func (m *ImageFilter) String() string { return proto.CompactTextString(m) }
func (*ImageFilter) ProtoMessage() {} func (*ImageFilter) ProtoMessage() {}
func (*ImageFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ImageFilter) GetLabels() []*KeyValue { func (m *ImageFilter) GetLabels() []*KeyValue {
if m != nil { if m != nil {
@@ -443,9 +461,10 @@ type Info struct {
ApiVersion string `protobuf:"bytes,3,opt,name=api_version" json:"api_version,omitempty"` ApiVersion string `protobuf:"bytes,3,opt,name=api_version" json:"api_version,omitempty"`
} }
func (m *Info) Reset() { *m = Info{} } func (m *Info) Reset() { *m = Info{} }
func (m *Info) String() string { return proto.CompactTextString(m) } func (m *Info) String() string { return proto.CompactTextString(m) }
func (*Info) ProtoMessage() {} func (*Info) ProtoMessage() {}
func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
// Event describes the events that will be received via ListenEvents(). // Event describes the events that will be received via ListenEvents().
type Event struct { type Event struct {
@@ -466,9 +485,10 @@ type Event struct {
Data []*KeyValue `protobuf:"bytes,5,rep,name=data" json:"data,omitempty"` Data []*KeyValue `protobuf:"bytes,5,rep,name=data" json:"data,omitempty"`
} }
func (m *Event) Reset() { *m = Event{} } func (m *Event) Reset() { *m = Event{} }
func (m *Event) String() string { return proto.CompactTextString(m) } func (m *Event) String() string { return proto.CompactTextString(m) }
func (*Event) ProtoMessage() {} func (*Event) ProtoMessage() {}
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *Event) GetData() []*KeyValue { func (m *Event) GetData() []*KeyValue {
if m != nil { if m != nil {
@@ -495,26 +515,29 @@ type EventFilter struct {
UntilTime int64 `protobuf:"varint,5,opt,name=until_time" json:"until_time,omitempty"` UntilTime int64 `protobuf:"varint,5,opt,name=until_time" json:"until_time,omitempty"`
} }
func (m *EventFilter) Reset() { *m = EventFilter{} } func (m *EventFilter) Reset() { *m = EventFilter{} }
func (m *EventFilter) String() string { return proto.CompactTextString(m) } func (m *EventFilter) String() string { return proto.CompactTextString(m) }
func (*EventFilter) ProtoMessage() {} func (*EventFilter) ProtoMessage() {}
func (*EventFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
// Request for GetInfo(). // Request for GetInfo().
type GetInfoRequest struct { type GetInfoRequest struct {
} }
func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} }
func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) }
func (*GetInfoRequest) ProtoMessage() {} func (*GetInfoRequest) ProtoMessage() {}
func (*GetInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
// Response for GetInfo(). // Response for GetInfo().
type GetInfoResponse struct { type GetInfoResponse struct {
Info *Info `protobuf:"bytes,1,opt,name=info" json:"info,omitempty"` Info *Info `protobuf:"bytes,1,opt,name=info" json:"info,omitempty"`
} }
func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} } func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} }
func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) } func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) }
func (*GetInfoResponse) ProtoMessage() {} func (*GetInfoResponse) ProtoMessage() {}
func (*GetInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
func (m *GetInfoResponse) GetInfo() *Info { func (m *GetInfoResponse) GetInfo() *Info {
if m != nil { if m != nil {
@@ -529,9 +552,10 @@ type ListPodsRequest struct {
Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"` Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"`
} }
func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} } func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} }
func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) } func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) }
func (*ListPodsRequest) ProtoMessage() {} func (*ListPodsRequest) ProtoMessage() {}
func (*ListPodsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
func (m *ListPodsRequest) GetFilters() []*PodFilter { func (m *ListPodsRequest) GetFilters() []*PodFilter {
if m != nil { if m != nil {
@@ -545,9 +569,10 @@ type ListPodsResponse struct {
Pods []*Pod `protobuf:"bytes,1,rep,name=pods" json:"pods,omitempty"` Pods []*Pod `protobuf:"bytes,1,rep,name=pods" json:"pods,omitempty"`
} }
func (m *ListPodsResponse) Reset() { *m = ListPodsResponse{} } func (m *ListPodsResponse) Reset() { *m = ListPodsResponse{} }
func (m *ListPodsResponse) String() string { return proto.CompactTextString(m) } func (m *ListPodsResponse) String() string { return proto.CompactTextString(m) }
func (*ListPodsResponse) ProtoMessage() {} func (*ListPodsResponse) ProtoMessage() {}
func (*ListPodsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
func (m *ListPodsResponse) GetPods() []*Pod { func (m *ListPodsResponse) GetPods() []*Pod {
if m != nil { if m != nil {
@@ -562,18 +587,20 @@ type InspectPodRequest struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
} }
func (m *InspectPodRequest) Reset() { *m = InspectPodRequest{} } func (m *InspectPodRequest) Reset() { *m = InspectPodRequest{} }
func (m *InspectPodRequest) String() string { return proto.CompactTextString(m) } func (m *InspectPodRequest) String() string { return proto.CompactTextString(m) }
func (*InspectPodRequest) ProtoMessage() {} func (*InspectPodRequest) ProtoMessage() {}
func (*InspectPodRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
// Response for InspectPod(). // Response for InspectPod().
type InspectPodResponse struct { type InspectPodResponse struct {
Pod *Pod `protobuf:"bytes,1,opt,name=pod" json:"pod,omitempty"` Pod *Pod `protobuf:"bytes,1,opt,name=pod" json:"pod,omitempty"`
} }
func (m *InspectPodResponse) Reset() { *m = InspectPodResponse{} } func (m *InspectPodResponse) Reset() { *m = InspectPodResponse{} }
func (m *InspectPodResponse) String() string { return proto.CompactTextString(m) } func (m *InspectPodResponse) String() string { return proto.CompactTextString(m) }
func (*InspectPodResponse) ProtoMessage() {} func (*InspectPodResponse) ProtoMessage() {}
func (*InspectPodResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
func (m *InspectPodResponse) GetPod() *Pod { func (m *InspectPodResponse) GetPod() *Pod {
if m != nil { if m != nil {
@@ -588,9 +615,10 @@ type ListImagesRequest struct {
Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"` Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"`
} }
func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} }
func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) } func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) }
func (*ListImagesRequest) ProtoMessage() {} func (*ListImagesRequest) ProtoMessage() {}
func (*ListImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
func (m *ListImagesRequest) GetFilters() []*ImageFilter { func (m *ListImagesRequest) GetFilters() []*ImageFilter {
if m != nil { if m != nil {
@@ -604,9 +632,10 @@ type ListImagesResponse struct {
Images []*Image `protobuf:"bytes,1,rep,name=images" json:"images,omitempty"` Images []*Image `protobuf:"bytes,1,rep,name=images" json:"images,omitempty"`
} }
func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} }
func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) } func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) }
func (*ListImagesResponse) ProtoMessage() {} func (*ListImagesResponse) ProtoMessage() {}
func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *ListImagesResponse) GetImages() []*Image { func (m *ListImagesResponse) GetImages() []*Image {
if m != nil { if m != nil {
@@ -620,18 +649,20 @@ type InspectImageRequest struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
} }
func (m *InspectImageRequest) Reset() { *m = InspectImageRequest{} } func (m *InspectImageRequest) Reset() { *m = InspectImageRequest{} }
func (m *InspectImageRequest) String() string { return proto.CompactTextString(m) } func (m *InspectImageRequest) String() string { return proto.CompactTextString(m) }
func (*InspectImageRequest) ProtoMessage() {} func (*InspectImageRequest) ProtoMessage() {}
func (*InspectImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
// Response for InspectImage(). // Response for InspectImage().
type InspectImageResponse struct { type InspectImageResponse struct {
Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
} }
func (m *InspectImageResponse) Reset() { *m = InspectImageResponse{} } func (m *InspectImageResponse) Reset() { *m = InspectImageResponse{} }
func (m *InspectImageResponse) String() string { return proto.CompactTextString(m) } func (m *InspectImageResponse) String() string { return proto.CompactTextString(m) }
func (*InspectImageResponse) ProtoMessage() {} func (*InspectImageResponse) ProtoMessage() {}
func (*InspectImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *InspectImageResponse) GetImage() *Image { func (m *InspectImageResponse) GetImage() *Image {
if m != nil { if m != nil {
@@ -645,9 +676,10 @@ type ListenEventsRequest struct {
Filter *EventFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` Filter *EventFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"`
} }
func (m *ListenEventsRequest) Reset() { *m = ListenEventsRequest{} } func (m *ListenEventsRequest) Reset() { *m = ListenEventsRequest{} }
func (m *ListenEventsRequest) String() string { return proto.CompactTextString(m) } func (m *ListenEventsRequest) String() string { return proto.CompactTextString(m) }
func (*ListenEventsRequest) ProtoMessage() {} func (*ListenEventsRequest) ProtoMessage() {}
func (*ListenEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
func (m *ListenEventsRequest) GetFilter() *EventFilter { func (m *ListenEventsRequest) GetFilter() *EventFilter {
if m != nil { if m != nil {
@@ -662,9 +694,10 @@ type ListenEventsResponse struct {
Events []*Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` Events []*Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"`
} }
func (m *ListenEventsResponse) Reset() { *m = ListenEventsResponse{} } func (m *ListenEventsResponse) Reset() { *m = ListenEventsResponse{} }
func (m *ListenEventsResponse) String() string { return proto.CompactTextString(m) } func (m *ListenEventsResponse) String() string { return proto.CompactTextString(m) }
func (*ListenEventsResponse) ProtoMessage() {} func (*ListenEventsResponse) ProtoMessage() {}
func (*ListenEventsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
func (m *ListenEventsResponse) GetEvents() []*Event { func (m *ListenEventsResponse) GetEvents() []*Event {
if m != nil { if m != nil {
@@ -694,9 +727,10 @@ type GetLogsRequest struct {
UntilTime int64 `protobuf:"varint,6,opt,name=until_time" json:"until_time,omitempty"` UntilTime int64 `protobuf:"varint,6,opt,name=until_time" json:"until_time,omitempty"`
} }
func (m *GetLogsRequest) Reset() { *m = GetLogsRequest{} } func (m *GetLogsRequest) Reset() { *m = GetLogsRequest{} }
func (m *GetLogsRequest) String() string { return proto.CompactTextString(m) } func (m *GetLogsRequest) String() string { return proto.CompactTextString(m) }
func (*GetLogsRequest) ProtoMessage() {} func (*GetLogsRequest) ProtoMessage() {}
func (*GetLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
// Response for GetLogs(). // Response for GetLogs().
type GetLogsResponse struct { type GetLogsResponse struct {
@@ -704,11 +738,37 @@ type GetLogsResponse struct {
Lines []string `protobuf:"bytes,1,rep,name=lines" json:"lines,omitempty"` Lines []string `protobuf:"bytes,1,rep,name=lines" json:"lines,omitempty"`
} }
func (m *GetLogsResponse) Reset() { *m = GetLogsResponse{} } func (m *GetLogsResponse) Reset() { *m = GetLogsResponse{} }
func (m *GetLogsResponse) String() string { return proto.CompactTextString(m) } func (m *GetLogsResponse) String() string { return proto.CompactTextString(m) }
func (*GetLogsResponse) ProtoMessage() {} func (*GetLogsResponse) ProtoMessage() {}
func (*GetLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
func init() { func init() {
proto.RegisterType((*ImageFormat)(nil), "v1alpha.ImageFormat")
proto.RegisterType((*Image)(nil), "v1alpha.Image")
proto.RegisterType((*Network)(nil), "v1alpha.Network")
proto.RegisterType((*App)(nil), "v1alpha.App")
proto.RegisterType((*Pod)(nil), "v1alpha.Pod")
proto.RegisterType((*KeyValue)(nil), "v1alpha.KeyValue")
proto.RegisterType((*PodFilter)(nil), "v1alpha.PodFilter")
proto.RegisterType((*ImageFilter)(nil), "v1alpha.ImageFilter")
proto.RegisterType((*Info)(nil), "v1alpha.Info")
proto.RegisterType((*Event)(nil), "v1alpha.Event")
proto.RegisterType((*EventFilter)(nil), "v1alpha.EventFilter")
proto.RegisterType((*GetInfoRequest)(nil), "v1alpha.GetInfoRequest")
proto.RegisterType((*GetInfoResponse)(nil), "v1alpha.GetInfoResponse")
proto.RegisterType((*ListPodsRequest)(nil), "v1alpha.ListPodsRequest")
proto.RegisterType((*ListPodsResponse)(nil), "v1alpha.ListPodsResponse")
proto.RegisterType((*InspectPodRequest)(nil), "v1alpha.InspectPodRequest")
proto.RegisterType((*InspectPodResponse)(nil), "v1alpha.InspectPodResponse")
proto.RegisterType((*ListImagesRequest)(nil), "v1alpha.ListImagesRequest")
proto.RegisterType((*ListImagesResponse)(nil), "v1alpha.ListImagesResponse")
proto.RegisterType((*InspectImageRequest)(nil), "v1alpha.InspectImageRequest")
proto.RegisterType((*InspectImageResponse)(nil), "v1alpha.InspectImageResponse")
proto.RegisterType((*ListenEventsRequest)(nil), "v1alpha.ListenEventsRequest")
proto.RegisterType((*ListenEventsResponse)(nil), "v1alpha.ListenEventsResponse")
proto.RegisterType((*GetLogsRequest)(nil), "v1alpha.GetLogsRequest")
proto.RegisterType((*GetLogsResponse)(nil), "v1alpha.GetLogsResponse")
proto.RegisterEnum("v1alpha.ImageType", ImageType_name, ImageType_value) proto.RegisterEnum("v1alpha.ImageType", ImageType_name, ImageType_value)
proto.RegisterEnum("v1alpha.AppState", AppState_name, AppState_value) proto.RegisterEnum("v1alpha.AppState", AppState_name, AppState_value)
proto.RegisterEnum("v1alpha.PodState", PodState_name, PodState_value) proto.RegisterEnum("v1alpha.PodState", PodState_name, PodState_value)
@@ -890,9 +950,9 @@ func RegisterPublicAPIServer(s *grpc.Server, srv PublicAPIServer) {
s.RegisterService(&_PublicAPI_serviceDesc, srv) s.RegisterService(&_PublicAPI_serviceDesc, srv)
} }
func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(GetInfoRequest) in := new(GetInfoRequest)
if err := codec.Unmarshal(buf, in); err != nil { if err := dec(in); err != nil {
return nil, err return nil, err
} }
out, err := srv.(PublicAPIServer).GetInfo(ctx, in) out, err := srv.(PublicAPIServer).GetInfo(ctx, in)
@@ -902,9 +962,9 @@ func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, codec grpc
return out, nil return out, nil
} }
func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(ListPodsRequest) in := new(ListPodsRequest)
if err := codec.Unmarshal(buf, in); err != nil { if err := dec(in); err != nil {
return nil, err return nil, err
} }
out, err := srv.(PublicAPIServer).ListPods(ctx, in) out, err := srv.(PublicAPIServer).ListPods(ctx, in)
@@ -914,9 +974,9 @@ func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, codec grp
return out, nil return out, nil
} }
func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(InspectPodRequest) in := new(InspectPodRequest)
if err := codec.Unmarshal(buf, in); err != nil { if err := dec(in); err != nil {
return nil, err return nil, err
} }
out, err := srv.(PublicAPIServer).InspectPod(ctx, in) out, err := srv.(PublicAPIServer).InspectPod(ctx, in)
@@ -926,9 +986,9 @@ func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, codec g
return out, nil return out, nil
} }
func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(ListImagesRequest) in := new(ListImagesRequest)
if err := codec.Unmarshal(buf, in); err != nil { if err := dec(in); err != nil {
return nil, err return nil, err
} }
out, err := srv.(PublicAPIServer).ListImages(ctx, in) out, err := srv.(PublicAPIServer).ListImages(ctx, in)
@@ -938,9 +998,9 @@ func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, codec g
return out, nil return out, nil
} }
func _PublicAPI_InspectImage_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { func _PublicAPI_InspectImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(InspectImageRequest) in := new(InspectImageRequest)
if err := codec.Unmarshal(buf, in); err != nil { if err := dec(in); err != nil {
return nil, err return nil, err
} }
out, err := srv.(PublicAPIServer).InspectImage(ctx, in) out, err := srv.(PublicAPIServer).InspectImage(ctx, in)
@@ -1030,3 +1090,90 @@ var _PublicAPI_serviceDesc = grpc.ServiceDesc{
}, },
}, },
} }
var fileDescriptor0 = []byte{
// 1318 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x72, 0xdb, 0x46,
0x13, 0x35, 0x08, 0x82, 0x3f, 0x4d, 0x9a, 0x02, 0x47, 0xb2, 0x45, 0xd3, 0x7f, 0x32, 0xbe, 0x2f,
0x2e, 0x47, 0x0b, 0x25, 0x91, 0x1d, 0x6f, 0x52, 0x95, 0x32, 0x25, 0x41, 0x2a, 0xc6, 0x12, 0xc9,
0xa2, 0x69, 0x55, 0xbc, 0x42, 0x41, 0xe2, 0xd0, 0x41, 0x89, 0x04, 0x10, 0x02, 0x92, 0xad, 0x2c,
0x73, 0x81, 0x5c, 0x21, 0xb7, 0x48, 0x55, 0x36, 0xb9, 0x41, 0x6e, 0x91, 0x7b, 0xa4, 0x67, 0x30,
0x00, 0x06, 0x20, 0xb8, 0xc8, 0x8e, 0xe8, 0xee, 0x79, 0xfd, 0xba, 0x7b, 0xfa, 0x01, 0x84, 0xba,
0xed, 0x3b, 0x7b, 0xfe, 0xd2, 0x0b, 0x3d, 0x52, 0xbd, 0xf9, 0xc6, 0x9e, 0xfb, 0x3f, 0xd9, 0xc6,
0x1b, 0x68, 0xf4, 0x17, 0xf6, 0x47, 0x7a, 0xec, 0x2d, 0x17, 0x76, 0x48, 0x76, 0xa0, 0x1c, 0xde,
0xfa, 0xb4, 0xa3, 0xec, 0x28, 0x2f, 0x5a, 0xfb, 0x64, 0x4f, 0x84, 0xed, 0xf1, 0x98, 0x09, 0x7a,
0xc8, 0x06, 0x54, 0x6f, 0xe8, 0x32, 0x70, 0x3c, 0xb7, 0x53, 0xc2, 0xa0, 0xba, 0xf1, 0x97, 0x02,
0x1a, 0x77, 0x93, 0x2f, 0xa1, 0x71, 0x61, 0x07, 0xd4, 0x9a, 0x71, 0x2c, 0x8e, 0xd1, 0xd8, 0xdf,
0xca, 0x62, 0x88, 0x3c, 0x00, 0x25, 0x67, 0x1a, 0x01, 0x90, 0x26, 0x94, 0x5d, 0x7b, 0x41, 0x3b,
0x2a, 0x7f, 0x92, 0xf0, 0xcb, 0xdc, 0xd0, 0x01, 0xdd, 0x59, 0xf8, 0xde, 0x32, 0xb4, 0x42, 0x67,
0x41, 0x83, 0xd0, 0x5e, 0xf8, 0x1d, 0x0d, 0x3d, 0x2a, 0xd1, 0xa1, 0xb6, 0xb0, 0x5d, 0x67, 0x86,
0xc6, 0x4e, 0x05, 0x2d, 0x4d, 0x06, 0x15, 0x38, 0xbf, 0xd0, 0x4e, 0x95, 0xfb, 0x9f, 0x43, 0xc3,
0x76, 0x5d, 0x2f, 0xb4, 0x43, 0x44, 0x0b, 0x3a, 0xb5, 0x1d, 0x15, 0xf9, 0xb4, 0x13, 0x3e, 0x6f,
0xe9, 0xed, 0xb9, 0x3d, 0xbf, 0xa6, 0xc6, 0x4b, 0xa8, 0x0e, 0x68, 0xf8, 0xc9, 0x5b, 0x5e, 0x25,
0x5c, 0x94, 0x98, 0x99, 0xe3, 0xdf, 0xbc, 0x4a, 0x79, 0xe2, 0xd3, 0xeb, 0x88, 0xa7, 0xf1, 0x9b,
0x02, 0x6a, 0xcf, 0xf7, 0x73, 0x27, 0x1e, 0x83, 0xe6, 0xb0, 0x32, 0xf9, 0x91, 0xc6, 0x7e, 0x2b,
0x5b, 0x3c, 0xb6, 0x57, 0xc3, 0x02, 0xc2, 0xa8, 0xd6, 0x96, 0xc4, 0x05, 0x91, 0xde, 0x31, 0x07,
0x69, 0x43, 0x9d, 0x7e, 0x76, 0x42, 0xeb, 0xd2, 0x9b, 0x52, 0xde, 0x80, 0x76, 0xbe, 0x0c, 0x6d,
0x5d, 0x19, 0x7f, 0x22, 0xa3, 0x91, 0x37, 0x15, 0xbd, 0x8d, 0xf8, 0x34, 0x40, 0xf5, 0x45, 0xa3,
0xdb, 0xeb, 0xb3, 0xe3, 0xa9, 0x28, 0x7b, 0x17, 0xca, 0xb6, 0xef, 0x07, 0x98, 0x98, 0xe5, 0x68,
0xca, 0xf4, 0x88, 0x01, 0x35, 0x37, 0xea, 0x52, 0xcc, 0x41, 0x4f, 0xfc, 0x71, 0xfb, 0x56, 0x27,
0x92, 0x23, 0x5f, 0x5d, 0x47, 0xfe, 0x39, 0xd4, 0xe2, 0xdf, 0x8c, 0x34, 0xfe, 0x16, 0x15, 0xdc,
0x05, 0xed, 0x86, 0x59, 0xc5, 0x6d, 0xfb, 0x5d, 0x81, 0x3a, 0xd2, 0x3d, 0x76, 0xe6, 0x21, 0x5d,
0xb2, 0x48, 0x67, 0x1a, 0x60, 0xa4, 0x8a, 0x91, 0xcf, 0xa0, 0xc2, 0xcb, 0x0b, 0x30, 0x54, 0x2d,
0xae, 0xaf, 0xcd, 0x76, 0xc0, 0xb7, 0xd8, 0xc0, 0x02, 0xec, 0x02, 0x3b, 0x85, 0x26, 0x3e, 0x31,
0x8b, 0x01, 0x95, 0xb9, 0xe9, 0x1e, 0xdc, 0x15, 0x95, 0x8a, 0x48, 0x8d, 0x9b, 0x73, 0xa5, 0x54,
0xd6, 0x95, 0xf2, 0xb7, 0x12, 0xef, 0x54, 0x01, 0x49, 0xec, 0x90, 0xbf, 0xa4, 0x33, 0xe7, 0xb3,
0xa0, 0x59, 0x27, 0x38, 0x2f, 0xbe, 0x35, 0x32, 0x29, 0x8c, 0xba, 0xa2, 0xb7, 0xc8, 0x20, 0xe1,
0x84, 0xc5, 0xcd, 0xed, 0x0b, 0x3a, 0x5f, 0x3f, 0x7f, 0x72, 0x1f, 0x5a, 0xd1, 0xa2, 0xd0, 0xa9,
0x65, 0xcf, 0x30, 0x33, 0x1f, 0x81, 0x4a, 0xb6, 0x61, 0x23, 0xb1, 0x5f, 0x50, 0x5c, 0xce, 0xff,
0xba, 0x1f, 0xc7, 0x50, 0xee, 0xbb, 0x33, 0x8f, 0x6c, 0x42, 0x63, 0x79, 0x15, 0x5a, 0xf1, 0x7a,
0x46, 0xf3, 0xd9, 0x82, 0x26, 0xb6, 0xf4, 0xd2, 0xca, 0x88, 0x02, 0x0b, 0x45, 0xb1, 0x49, 0x8c,
0xd1, 0xca, 0x2c, 0x41, 0x33, 0x6f, 0xa8, 0xbb, 0x5e, 0x65, 0xb8, 0x97, 0xab, 0x4c, 0x4e, 0x1f,
0x66, 0x4b, 0x6f, 0x21, 0xf4, 0x01, 0x9f, 0x98, 0x0e, 0xf0, 0xdd, 0x50, 0xc9, 0x53, 0x28, 0x4f,
0xed, 0xd0, 0x5e, 0xbf, 0x14, 0x21, 0x34, 0x38, 0xaa, 0x98, 0xc5, 0x33, 0xd0, 0x58, 0xe6, 0x68,
0x1a, 0xc5, 0xa9, 0xc5, 0xb8, 0xa2, 0xe1, 0xe0, 0xed, 0x93, 0xe7, 0x82, 0xbc, 0x02, 0xc7, 0xbd,
0xa4, 0x96, 0x44, 0x01, 0x6d, 0xd7, 0x6e, 0xe8, 0xcc, 0x23, 0x1b, 0x57, 0x26, 0x43, 0x87, 0xd6,
0x09, 0x0d, 0x59, 0xd3, 0xc6, 0xf4, 0xe7, 0x6b, 0xdc, 0x06, 0x63, 0x0f, 0x36, 0x12, 0x4b, 0xe0,
0x63, 0xbb, 0x29, 0x79, 0x88, 0x7a, 0x82, 0xcf, 0x42, 0x27, 0xef, 0xa6, 0x52, 0x81, 0x46, 0xec,
0xf9, 0xc6, 0xa9, 0x13, 0x84, 0x78, 0x73, 0x03, 0x01, 0x41, 0xfe, 0x07, 0xd5, 0x19, 0xaf, 0x22,
0x62, 0xdf, 0x90, 0xd8, 0xa7, 0x1b, 0xd1, 0x82, 0xca, 0x94, 0x86, 0xb6, 0x33, 0xe7, 0xcd, 0xab,
0x61, 0x5e, 0x3d, 0xc5, 0x11, 0x89, 0x71, 0xcb, 0x7d, 0x6f, 0x1a, 0xa3, 0x34, 0x65, 0x14, 0xe3,
0x29, 0xb4, 0xfb, 0x6e, 0xe0, 0xd3, 0x4b, 0x76, 0x24, 0xce, 0x2c, 0x29, 0x8a, 0xf1, 0x15, 0x10,
0x39, 0x40, 0x40, 0x3e, 0x40, 0x9d, 0xf1, 0xa6, 0xa2, 0x94, 0x2c, 0xe2, 0x0f, 0xd0, 0x66, 0x0c,
0xf8, 0x46, 0x24, 0xb5, 0x7c, 0x91, 0xaf, 0x25, 0xff, 0x9a, 0x28, 0xae, 0xe6, 0x15, 0x10, 0x19,
0x4b, 0x24, 0x7f, 0x02, 0x15, 0xbe, 0xc2, 0x31, 0x56, 0x4e, 0x75, 0x8d, 0x67, 0xb0, 0x29, 0x28,
0xf3, 0xe7, 0xa2, 0xaa, 0xbe, 0x85, 0xad, 0x6c, 0x88, 0x80, 0x4e, 0xf4, 0x5c, 0x29, 0xd2, 0x73,
0xe3, 0x3b, 0xd8, 0x64, 0x7c, 0xa8, 0xcb, 0xaf, 0x4f, 0x52, 0xdd, 0xff, 0xa1, 0x12, 0x55, 0xb7,
0xf2, 0x0e, 0x94, 0xee, 0xa2, 0xf1, 0x1a, 0xb6, 0xb2, 0x87, 0xd3, 0x72, 0x28, 0xb7, 0xac, 0x94,
0xc3, 0x03, 0x8d, 0x5b, 0x7e, 0xb9, 0x4e, 0xbd, 0x8f, 0x49, 0x3e, 0x6c, 0x13, 0x76, 0xdf, 0x4a,
0x54, 0x1f, 0xe5, 0x23, 0x96, 0x39, 0xb1, 0x43, 0x78, 0x8f, 0xe7, 0x8e, 0xcb, 0xef, 0xb1, 0xf2,
0x42, 0x63, 0x07, 0x66, 0xde, 0x7c, 0xee, 0x7d, 0xe2, 0x77, 0xb8, 0x96, 0xbb, 0xd7, 0x5a, 0xc1,
0xbd, 0xe6, 0x52, 0x62, 0xec, 0xf0, 0x5b, 0x1c, 0xa5, 0x16, 0x6c, 0x13, 0x64, 0xae, 0x6f, 0xbb,
0x14, 0xea, 0xe9, 0xb7, 0x42, 0x07, 0xbb, 0x7a, 0xd6, 0x3b, 0x31, 0xad, 0xc9, 0x87, 0x91, 0x69,
0xbd, 0x1f, 0x1c, 0x99, 0xc7, 0xfd, 0x81, 0x79, 0xa4, 0xdf, 0x41, 0x7d, 0xd8, 0x90, 0x3c, 0xbd,
0xd1, 0xe8, 0x50, 0x57, 0x50, 0x77, 0xdb, 0x92, 0xf1, 0x68, 0x78, 0xf8, 0xd6, 0x1c, 0xeb, 0x25,
0x24, 0xd2, 0x92, 0xcc, 0xc3, 0xc3, 0xbe, 0xae, 0xee, 0x8e, 0xa0, 0x96, 0xbc, 0x32, 0xb7, 0x61,
0x13, 0x01, 0xac, 0x77, 0x93, 0xde, 0x24, 0x9b, 0x04, 0xf1, 0x52, 0xc7, 0xf8, 0xfd, 0x60, 0xd0,
0x1f, 0x9c, 0x60, 0x9a, 0x2d, 0xd0, 0x53, 0xb3, 0xf9, 0x63, 0x7f, 0x82, 0xc1, 0xa5, 0xdd, 0x7f,
0x14, 0xa8, 0x25, 0xef, 0x09, 0x84, 0x1c, 0x0d, 0x8f, 0x0a, 0x20, 0xf1, 0x6c, 0xea, 0x30, 0xcf,
0x0e, 0xc6, 0x1f, 0x86, 0x88, 0x98, 0x09, 0x1f, 0x8d, 0xcd, 0x51, 0x6f, 0xcc, 0x52, 0x95, 0x50,
0x92, 0x49, 0xde, 0x81, 0x30, 0x2a, 0x63, 0x96, 0xda, 0x63, 0x66, 0x65, 0xbc, 0x6d, 0x0f, 0x52,
0x73, 0xef, 0x60, 0x38, 0x46, 0x6a, 0xf1, 0x31, 0x5d, 0xcb, 0x25, 0x8f, 0x88, 0x57, 0xb2, 0x39,
0x8e, 0xcc, 0x53, 0x73, 0xc2, 0xc0, 0xaa, 0xd9, 0x1c, 0x27, 0xbd, 0xf1, 0x01, 0xb6, 0x50, 0xaf,
0xed, 0xfe, 0x51, 0x82, 0x7a, 0x2a, 0x76, 0x38, 0x21, 0xf3, 0xdc, 0x1c, 0x4c, 0x56, 0x27, 0xf4,
0x10, 0xb6, 0x25, 0x0f, 0x43, 0x4a, 0xf8, 0x2b, 0xf8, 0x2d, 0xf0, 0xa4, 0xd8, 0x19, 0xb3, 0xc6,
0xda, 0xbb, 0x70, 0x3f, 0x17, 0x83, 0x54, 0xb8, 0x4f, 0x45, 0xb9, 0xb8, 0x97, 0xf3, 0x89, 0x72,
0xca, 0xb8, 0x3b, 0x3b, 0x39, 0x97, 0xe0, 0x6e, 0x1d, 0x0e, 0x4f, 0x4f, 0xcd, 0x43, 0x16, 0xa5,
0xe5, 0xc0, 0xc5, 0x38, 0xc7, 0x51, 0x43, 0xb2, 0xe0, 0xcc, 0x27, 0xc0, 0xab, 0xac, 0xc1, 0x92,
0x2b, 0xba, 0x55, 0xfd, 0xb3, 0x51, 0x44, 0xb9, 0x46, 0x1e, 0x41, 0x67, 0xc5, 0x3d, 0x36, 0xcf,
0x86, 0xe7, 0xe8, 0xad, 0xef, 0xff, 0x5a, 0xc6, 0x4f, 0x8f, 0xeb, 0x8b, 0xb9, 0x73, 0xd9, 0x1b,
0xf5, 0xc9, 0xf7, 0x50, 0x15, 0x82, 0x4e, 0xb6, 0x93, 0x05, 0xcd, 0x8a, 0x7e, 0xb7, 0xb3, 0xea,
0x88, 0xb6, 0xc6, 0xb8, 0x43, 0x7a, 0x50, 0x8b, 0x85, 0x99, 0xa4, 0x71, 0x39, 0xcd, 0xef, 0x3e,
0x28, 0xf0, 0x24, 0x10, 0x27, 0x00, 0xa9, 0x14, 0x93, 0xae, 0xf4, 0x02, 0xc9, 0x09, 0x78, 0xf7,
0x61, 0xa1, 0x4f, 0x06, 0x4a, 0x65, 0x55, 0x02, 0x5a, 0xd1, 0x6d, 0x09, 0x68, 0x55, 0x87, 0x11,
0xe8, 0x0c, 0x9a, 0xb2, 0x8c, 0x92, 0x47, 0xf9, 0xbc, 0xb2, 0x00, 0x77, 0x1f, 0xaf, 0xf1, 0x26,
0x70, 0x43, 0x68, 0xca, 0x0a, 0x29, 0xc1, 0x15, 0xa8, 0xae, 0x04, 0x57, 0x24, 0xab, 0xc6, 0x9d,
0xaf, 0x15, 0xf2, 0x86, 0x0f, 0x8d, 0xe9, 0x57, 0x76, 0x68, 0x92, 0x98, 0x66, 0x87, 0x26, 0x4b,
0x1d, 0x43, 0xb8, 0xa8, 0xf0, 0xff, 0x4f, 0x2f, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x84, 0xa0,
0x2b, 0xe3, 0x4c, 0x0d, 0x00, 0x00,
}

View File

@@ -21,8 +21,6 @@
// The API defined here is proposed, experimental, // // The API defined here is proposed, experimental, //
// and (for now) subject to change at any time. // // and (for now) subject to change at any time. //
// // // //
// Do not use it. //
// //
// If you think you want to use it, or for any other // // If you think you want to use it, or for any other //
// queries, contact <rkt-dev@googlegroups.com> // // queries, contact <rkt-dev@googlegroups.com> //
// or file an issue on github.com/coreos/rkt // // or file an issue on github.com/coreos/rkt //
@@ -234,6 +232,9 @@ message ImageFilter {
// If not empty, the images that have all of the annotations will be returned. // If not empty, the images that have all of the annotations will be returned.
repeated KeyValue annotations = 8; repeated KeyValue annotations = 8;
// If not empty, the images that have any of the exact full names will be returned.
repeated string full_names = 9;
} }
// Info describes the information of rkt on the machine. // Info describes the information of rkt on the machine.

View File

@@ -20,8 +20,6 @@ import (
"fmt" "fmt"
"os" "os"
// Note that if your project uses Godep to manage dependencies, then
// you need to change following the import paths.
"github.com/coreos/rkt/api/v1alpha" "github.com/coreos/rkt/api/v1alpha"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"

View File

@@ -320,6 +320,7 @@ func (s *Connection) Serve(newHandler StreamHandler) {
partitionRoundRobin int partitionRoundRobin int
goAwayFrame *spdy.GoAwayFrame goAwayFrame *spdy.GoAwayFrame
) )
Loop:
for { for {
readFrame, err := s.framer.ReadFrame() readFrame, err := s.framer.ReadFrame()
if err != nil { if err != nil {
@@ -362,7 +363,7 @@ func (s *Connection) Serve(newHandler StreamHandler) {
case *spdy.GoAwayFrame: case *spdy.GoAwayFrame:
// hold on to the go away frame and exit the loop // hold on to the go away frame and exit the loop
goAwayFrame = frame goAwayFrame = frame
break break Loop
default: default:
priority = 7 priority = 7
partition = partitionRoundRobin partition = partitionRoundRobin

View File

@@ -3,8 +3,8 @@ sudo: required
go: go:
- 1.3.3 - 1.3.3
- 1.4.2 - 1.4.2
- 1.5.2 - 1.5.3
- 1.6beta1 - 1.6rc2
- tip - tip
env: env:
- GOARCH=amd64 DOCKER_VERSION=1.7.1 - GOARCH=amd64 DOCKER_VERSION=1.7.1
@@ -13,6 +13,8 @@ env:
- GOARCH=386 DOCKER_VERSION=1.8.3 - GOARCH=386 DOCKER_VERSION=1.8.3
- GOARCH=amd64 DOCKER_VERSION=1.9.1 - GOARCH=amd64 DOCKER_VERSION=1.9.1
- GOARCH=386 DOCKER_VERSION=1.9.1 - GOARCH=386 DOCKER_VERSION=1.9.1
- GOARCH=amd64 DOCKER_VERSION=1.10.0
- GOARCH=386 DOCKER_VERSION=1.10.0
install: install:
- make prepare_docker - make prepare_docker
script: script:

View File

@@ -12,6 +12,7 @@ Antonio Murdaca <runcom@redhat.com>
Artem Sidorenko <artem@2realities.com> Artem Sidorenko <artem@2realities.com>
Ben Marini <ben@remind101.com> Ben Marini <ben@remind101.com>
Ben McCann <benmccann.com> Ben McCann <benmccann.com>
Ben Parees <bparees@redhat.com>
Benno van den Berg <bennovandenberg@gmail.com> Benno van den Berg <bennovandenberg@gmail.com>
Brendan Fosberry <brendan@codeship.com> Brendan Fosberry <brendan@codeship.com>
Brian Lalor <blalor@bravo5.org> Brian Lalor <blalor@bravo5.org>
@@ -37,6 +38,7 @@ Dave Choi <dave.choi@daumkakao.com>
David Huie <dahuie@gmail.com> David Huie <dahuie@gmail.com>
Dawn Chen <dawnchen@google.com> Dawn Chen <dawnchen@google.com>
Dinesh Subhraveti <dinesh@gemini-systems.net> Dinesh Subhraveti <dinesh@gemini-systems.net>
Drew Wells <drew.wells00@gmail.com>
Ed <edrocksit@gmail.com> Ed <edrocksit@gmail.com>
Elias G. Schneevoigt <eliasgs@gmail.com> Elias G. Schneevoigt <eliasgs@gmail.com>
Erez Horev <erez.horev@elastifile.com> Erez Horev <erez.horev@elastifile.com>
@@ -48,6 +50,7 @@ Flavia Missi <flaviamissi@gmail.com>
Francisco Souza <f@souza.cc> Francisco Souza <f@souza.cc>
Grégoire Delattre <gregoire.delattre@gmail.com> Grégoire Delattre <gregoire.delattre@gmail.com>
Guillermo Álvarez Fernández <guillermo@cientifico.net> Guillermo Álvarez Fernández <guillermo@cientifico.net>
Harry Zhang <harryzhang@zju.edu.cn>
He Simei <hesimei@zju.edu.cn> He Simei <hesimei@zju.edu.cn>
Ivan Mikushin <i.mikushin@gmail.com> Ivan Mikushin <i.mikushin@gmail.com>
James Bardin <jbardin@litl.com> James Bardin <jbardin@litl.com>
@@ -109,6 +112,7 @@ Summer Mousa <smousa@zenoss.com>
Sunjin Lee <styner32@gmail.com> Sunjin Lee <styner32@gmail.com>
Tarsis Azevedo <tarsis@corp.globo.com> Tarsis Azevedo <tarsis@corp.globo.com>
Tim Schindler <tim@catalyst-zero.com> Tim Schindler <tim@catalyst-zero.com>
Timothy St. Clair <tstclair@redhat.com>
Tobi Knaup <tobi@mesosphere.io> Tobi Knaup <tobi@mesosphere.io>
Tom Wilkie <tom.wilkie@gmail.com> Tom Wilkie <tom.wilkie@gmail.com>
Tonic <tonicbupt@gmail.com> Tonic <tonicbupt@gmail.com>

View File

@@ -41,7 +41,7 @@ prepare_docker:
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-get update sudo apt-get update
sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
pretest: lint vet fmtcheck pretest: lint vet fmtcheck

View File

@@ -60,7 +60,8 @@ func NewAPIVersion(input string) (APIVersion, error) {
if !strings.Contains(input, ".") { if !strings.Contains(input, ".") {
return nil, fmt.Errorf("Unable to parse version %q", input) return nil, fmt.Errorf("Unable to parse version %q", input)
} }
arr := strings.Split(input, ".") raw := strings.Split(input, "-")
arr := strings.Split(raw[0], ".")
ret := make(APIVersion, len(arr)) ret := make(APIVersion, len(arr))
var err error var err error
for i, val := range arr { for i, val := range arr {
@@ -586,7 +587,7 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
if err != nil { if err != nil {
return nil, err return nil, err
} }
req.Header.Set("Content-Type", "plain/text") req.Header.Set("Content-Type", "application/json")
req.Header.Set("Connection", "Upgrade") req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", "tcp") req.Header.Set("Upgrade", "tcp")
protocol := c.endpointURL.Scheme protocol := c.endpointURL.Scheme

View File

@@ -510,6 +510,7 @@ type HostConfig struct {
BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"` BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"`
Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty"`
} }
// StartContainer starts a container, returning an error in case of failure. // StartContainer starts a container, returning an error in case of failure.
@@ -638,6 +639,7 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
// See https://goo.gl/GNmLHb for more details. // See https://goo.gl/GNmLHb for more details.
type Stats struct { type Stats struct {
Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty"`
Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty"` Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty"`
MemoryStats struct { MemoryStats struct {
Stats struct { Stats struct {
@@ -670,6 +672,8 @@ type Stats struct {
Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"` Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"`
InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"` InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"`
TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"` TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"`
HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty"`
Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty"`
} `json:"stats,omitempty" yaml:"stats,omitempty"` } `json:"stats,omitempty" yaml:"stats,omitempty"`
MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"` MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"`
Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"` Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"`

View File

@@ -410,6 +410,8 @@ type BuildImageOptions struct {
Memory int64 `qs:"memory"` Memory int64 `qs:"memory"`
Memswap int64 `qs:"memswap"` Memswap int64 `qs:"memswap"`
CPUShares int64 `qs:"cpushares"` CPUShares int64 `qs:"cpushares"`
CPUQuota int64 `qs:"cpuquota"`
CPUPeriod int64 `qs:"cpuperiod"`
CPUSetCPUs string `qs:"cpusetcpus"` CPUSetCPUs string `qs:"cpusetcpus"`
InputStream io.Reader `qs:"-"` InputStream io.Reader `qs:"-"`
OutputStream io.Writer `qs:"-"` OutputStream io.Writer `qs:"-"`

View File

@@ -17,19 +17,20 @@ var ErrNetworkAlreadyExists = errors.New("network already exists")
// Network represents a network. // Network represents a network.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
type Network struct { type Network struct {
Name string Name string
ID string `json:"Id"` ID string `json:"Id"`
Scope string Scope string
Driver string Driver string
IPAM IPAMOptions
Containers map[string]Endpoint Containers map[string]Endpoint
Options map[string]string Options map[string]string
} }
// Endpoint contains network resources allocated and used for a container in a network // Endpoint contains network resources allocated and used for a container in a network
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
type Endpoint struct { type Endpoint struct {
Name string Name string
ID string `json:"EndpointID"` ID string `json:"EndpointID"`
@@ -40,7 +41,7 @@ type Endpoint struct {
// ListNetworks returns all networks. // ListNetworks returns all networks.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
func (c *Client) ListNetworks() ([]Network, error) { func (c *Client) ListNetworks() ([]Network, error) {
resp, err := c.do("GET", "/networks", doOptions{}) resp, err := c.do("GET", "/networks", doOptions{})
if err != nil { if err != nil {
@@ -56,7 +57,7 @@ func (c *Client) ListNetworks() ([]Network, error) {
// NetworkInfo returns information about a network by its ID. // NetworkInfo returns information about a network by its ID.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
func (c *Client) NetworkInfo(id string) (*Network, error) { func (c *Client) NetworkInfo(id string) (*Network, error) {
path := "/networks/" + id path := "/networks/" + id
resp, err := c.do("GET", path, doOptions{}) resp, err := c.do("GET", path, doOptions{})
@@ -77,7 +78,7 @@ func (c *Client) NetworkInfo(id string) (*Network, error) {
// CreateNetworkOptions specify parameters to the CreateNetwork function and // CreateNetworkOptions specify parameters to the CreateNetwork function and
// (for now) is the expected body of the "create network" http request message // (for now) is the expected body of the "create network" http request message
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
type CreateNetworkOptions struct { type CreateNetworkOptions struct {
Name string `json:"Name"` Name string `json:"Name"`
CheckDuplicate bool `json:"CheckDuplicate"` CheckDuplicate bool `json:"CheckDuplicate"`
@@ -107,7 +108,7 @@ type IPAMConfig struct {
// CreateNetwork creates a new network, returning the network instance, // CreateNetwork creates a new network, returning the network instance,
// or an error in case of failure. // or an error in case of failure.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
resp, err := c.do( resp, err := c.do(
"POST", "POST",
@@ -144,7 +145,7 @@ func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
// RemoveNetwork removes a network or returns an error in case of failure. // RemoveNetwork removes a network or returns an error in case of failure.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
func (c *Client) RemoveNetwork(id string) error { func (c *Client) RemoveNetwork(id string) error {
resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) resp, err := c.do("DELETE", "/networks/"+id, doOptions{})
if err != nil { if err != nil {
@@ -159,14 +160,14 @@ func (c *Client) RemoveNetwork(id string) error {
// NetworkConnectionOptions specify parameters to the ConnectNetwork and DisconnectNetwork function. // NetworkConnectionOptions specify parameters to the ConnectNetwork and DisconnectNetwork function.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
type NetworkConnectionOptions struct { type NetworkConnectionOptions struct {
Container string Container string
} }
// ConnectNetwork adds a container to a network or returns an error in case of failure. // ConnectNetwork adds a container to a network or returns an error in case of failure.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error {
resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{data: opts}) resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{data: opts})
if err != nil { if err != nil {
@@ -181,7 +182,7 @@ func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error
// DisconnectNetwork removes a container from a network or returns an error in case of failure. // DisconnectNetwork removes a container from a network or returns an error in case of failure.
// //
// See https://goo.gl/1kmPKZ for more details. // See https://goo.gl/6GugX3 for more details.
func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error {
resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts})
if err != nil { if err != nil {

View File

@@ -38,6 +38,7 @@ var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`)
// For more details on the remote API, check http://goo.gl/G3plxW. // For more details on the remote API, check http://goo.gl/G3plxW.
type DockerServer struct { type DockerServer struct {
containers []*docker.Container containers []*docker.Container
uploadedFiles map[string]string
execs []*docker.ExecInspect execs []*docker.ExecInspect
execMut sync.RWMutex execMut sync.RWMutex
cMut sync.RWMutex cMut sync.RWMutex
@@ -89,6 +90,7 @@ func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*h
execCallbacks: make(map[string]func()), execCallbacks: make(map[string]func()),
statsCallbacks: make(map[string]func(string) docker.Stats), statsCallbacks: make(map[string]func(string) docker.Stats),
customHandlers: make(map[string]http.Handler), customHandlers: make(map[string]http.Handler),
uploadedFiles: make(map[string]string),
cChan: containerChan, cChan: containerChan,
} }
server.buildMuxer() server.buildMuxer()
@@ -120,6 +122,7 @@ func (s *DockerServer) buildMuxer() {
s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer)) s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer))
s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer)) s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer))
s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer)) s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer))
s.mux.Path("/containers/{id:.*}/archive").Methods("PUT").HandlerFunc(s.handlerWrapper(s.uploadToContainer))
s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer)) s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer))
s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer)) s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer))
s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer)) s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer))
@@ -440,8 +443,8 @@ func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) {
s.cMut.Unlock() s.cMut.Unlock()
w.WriteHeader(http.StatusCreated) w.WriteHeader(http.StatusCreated)
s.notify(&container) s.notify(&container)
var c = struct{ ID string }{ID: container.ID}
json.NewEncoder(w).Encode(c) json.NewEncoder(w).Encode(container)
} }
func (s *DockerServer) generateID() string { func (s *DockerServer) generateID() string {
@@ -503,6 +506,23 @@ func (s *DockerServer) statsContainer(w http.ResponseWriter, r *http.Request) {
} }
} }
func (s *DockerServer) uploadToContainer(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
container, _, err := s.findContainer(id)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
if !container.State.Running {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Container %s is not running", id)
return
}
path := r.URL.Query().Get("path")
s.uploadedFiles[id] = path
w.WriteHeader(http.StatusOK)
}
func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) { func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"] id := mux.Vars(r)["id"]
container, _, err := s.findContainer(id) container, _, err := s.findContainer(id)

View File

@@ -230,6 +230,14 @@ var E_GogoprotoImport = &proto.ExtensionDesc{
Tag: "varint,63027,opt,name=gogoproto_import", Tag: "varint,63027,opt,name=gogoproto_import",
} }
var E_ProtosizerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63028,
Name: "gogoproto.protosizer_all",
Tag: "varint,63028,opt,name=protosizer_all",
}
var E_GoprotoGetters = &proto.ExtensionDesc{ var E_GoprotoGetters = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil), ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil), ExtensionType: (*bool)(nil),
@@ -382,6 +390,14 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{
Tag: "varint,64026,opt,name=goproto_unrecognized", Tag: "varint,64026,opt,name=goproto_unrecognized",
} }
var E_Protosizer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64028,
Name: "gogoproto.protosizer",
Tag: "varint,64028,opt,name=protosizer",
}
var E_Nullable = &proto.ExtensionDesc{ var E_Nullable = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil), ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*bool)(nil), ExtensionType: (*bool)(nil),
@@ -481,6 +497,7 @@ func init() {
proto.RegisterExtension(E_GoprotoExtensionsMapAll) proto.RegisterExtension(E_GoprotoExtensionsMapAll)
proto.RegisterExtension(E_GoprotoUnrecognizedAll) proto.RegisterExtension(E_GoprotoUnrecognizedAll)
proto.RegisterExtension(E_GogoprotoImport) proto.RegisterExtension(E_GogoprotoImport)
proto.RegisterExtension(E_ProtosizerAll)
proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoGetters)
proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_GoprotoStringer)
proto.RegisterExtension(E_VerboseEqual) proto.RegisterExtension(E_VerboseEqual)
@@ -500,6 +517,7 @@ func init() {
proto.RegisterExtension(E_UnsafeUnmarshaler) proto.RegisterExtension(E_UnsafeUnmarshaler)
proto.RegisterExtension(E_GoprotoExtensionsMap) proto.RegisterExtension(E_GoprotoExtensionsMap)
proto.RegisterExtension(E_GoprotoUnrecognized) proto.RegisterExtension(E_GoprotoUnrecognized)
proto.RegisterExtension(E_Protosizer)
proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Nullable)
proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Embed)
proto.RegisterExtension(E_Customtype) proto.RegisterExtension(E_Customtype)

View File

@@ -67,6 +67,8 @@ extend google.protobuf.FileOptions {
optional bool goproto_extensions_map_all = 63025; optional bool goproto_extensions_map_all = 63025;
optional bool goproto_unrecognized_all = 63026; optional bool goproto_unrecognized_all = 63026;
optional bool gogoproto_import = 63027; optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028;
} }
extend google.protobuf.MessageOptions { extend google.protobuf.MessageOptions {
@@ -93,6 +95,8 @@ extend google.protobuf.MessageOptions {
optional bool goproto_extensions_map = 64025; optional bool goproto_extensions_map = 64025;
optional bool goproto_unrecognized = 64026; optional bool goproto_unrecognized = 64026;
optional bool protosizer = 64028;
} }
extend google.protobuf.FieldOptions { extend google.protobuf.FieldOptions {

View File

@@ -213,6 +213,10 @@ func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf
return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
} }
func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
}
func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
} }

View File

@@ -80,7 +80,8 @@ var overwriters []map[string]gogoproto.EnableFunc = []map[string]gogoproto.Enabl
"verboseequal": gogoproto.HasVerboseEqual, "verboseequal": gogoproto.HasVerboseEqual,
}, },
{ {
"size": gogoproto.IsSizer, "size": gogoproto.IsSizer,
"protosizer": gogoproto.IsProtoSizer,
}, },
{ {
"unmarshaler": gogoproto.IsUnmarshaler, "unmarshaler": gogoproto.IsUnmarshaler,

View File

@@ -303,12 +303,12 @@ func wireToType(wire string) int {
panic("unreachable") panic("unreachable")
} }
func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorProto_Type, varName string) { func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorProto_Type, varName string, protoSizer bool) {
switch fieldTyp { switch fieldTyp {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE: case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
p.callFixed64(p.mathPkg.Use(), `.Float64bits(`, varName, `)`) p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(`, varName, `))`)
case descriptor.FieldDescriptorProto_TYPE_FLOAT: case descriptor.FieldDescriptorProto_TYPE_FLOAT:
p.callFixed32(p.mathPkg.Use(), `.Float32bits(`, varName, `)`) p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(`, varName, `))`)
case descriptor.FieldDescriptorProto_TYPE_INT64, case descriptor.FieldDescriptorProto_TYPE_INT64,
descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_UINT64,
descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_INT32,
@@ -341,7 +341,11 @@ func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorP
case descriptor.FieldDescriptorProto_TYPE_SINT64: case descriptor.FieldDescriptorProto_TYPE_SINT64:
p.callVarint(`(uint64(`, varName, `) << 1) ^ uint64((`, varName, ` >> 63))`) p.callVarint(`(uint64(`, varName, `) << 1) ^ uint64((`, varName, ` >> 63))`)
case descriptor.FieldDescriptorProto_TYPE_MESSAGE: case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
p.callVarint(varName, `.Size()`) if protoSizer {
p.callVarint(varName, `.ProtoSize()`)
} else {
p.callVarint(varName, `.Size()`)
}
p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(data[i:])`) p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(data[i:])`)
p.P(`if err != nil {`) p.P(`if err != nil {`)
p.In() p.In()
@@ -371,6 +375,8 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
nullable := gogoproto.IsNullable(field) nullable := gogoproto.IsNullable(field)
repeated := field.IsRepeated() repeated := field.IsRepeated()
required := field.IsRequired() required := field.IsRequired()
protoSizer := gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto)
if required && nullable { if required && nullable {
p.P(`if m.`, fieldname, `== nil {`) p.P(`if m.`, fieldname, `== nil {`)
p.In() p.In()
@@ -397,13 +403,13 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
} }
switch *field.Type { switch *field.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE: case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if !p.unsafe { if !p.unsafe || gogoproto.IsCastType(field) {
if packed { if packed {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callVarint(`len(m.`, fieldname, `) * 8`) p.callVarint(`len(m.`, fieldname, `) * 8`)
p.P(`for _, num := range m.`, fieldname, ` {`) p.P(`for _, num := range m.`, fieldname, ` {`)
p.In() p.In()
p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(num)`) p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`)
p.encodeFixed64("f" + numGen.Current()) p.encodeFixed64("f" + numGen.Current())
p.Out() p.Out()
p.P(`}`) p.P(`}`)
@@ -411,7 +417,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`for _, num := range m.`, fieldname, ` {`) p.P(`for _, num := range m.`, fieldname, ` {`)
p.In() p.In()
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(num)`) p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`)
p.encodeFixed64("f" + numGen.Current()) p.encodeFixed64("f" + numGen.Current())
p.Out() p.Out()
p.P(`}`) p.P(`}`)
@@ -419,15 +425,15 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`if m.`, fieldname, ` != 0 {`) p.P(`if m.`, fieldname, ` != 0 {`)
p.In() p.In()
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callFixed64(p.mathPkg.Use(), `.Float64bits(m.`+fieldname, `)`) p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else if !nullable { } else if !nullable {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callFixed64(p.mathPkg.Use(), `.Float64bits(m.`+fieldname, `)`) p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`)
} else { } else {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callFixed64(p.mathPkg.Use(), `.Float64bits(*m.`+fieldname, `)`) p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(*m.`+fieldname, `))`)
} }
} else { } else {
if packed { if packed {
@@ -461,13 +467,13 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
} }
} }
case descriptor.FieldDescriptorProto_TYPE_FLOAT: case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if !p.unsafe { if !p.unsafe || gogoproto.IsCastType(field) {
if packed { if packed {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callVarint(`len(m.`, fieldname, `) * 4`) p.callVarint(`len(m.`, fieldname, `) * 4`)
p.P(`for _, num := range m.`, fieldname, ` {`) p.P(`for _, num := range m.`, fieldname, ` {`)
p.In() p.In()
p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(num)`) p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`)
p.encodeFixed32("f" + numGen.Current()) p.encodeFixed32("f" + numGen.Current())
p.Out() p.Out()
p.P(`}`) p.P(`}`)
@@ -475,7 +481,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`for _, num := range m.`, fieldname, ` {`) p.P(`for _, num := range m.`, fieldname, ` {`)
p.In() p.In()
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(num)`) p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`)
p.encodeFixed32("f" + numGen.Current()) p.encodeFixed32("f" + numGen.Current())
p.Out() p.Out()
p.P(`}`) p.P(`}`)
@@ -483,15 +489,15 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`if m.`, fieldname, ` != 0 {`) p.P(`if m.`, fieldname, ` != 0 {`)
p.In() p.In()
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callFixed32(p.mathPkg.Use(), `.Float32bits(m.`+fieldname, `)`) p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else if !nullable { } else if !nullable {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callFixed32(p.mathPkg.Use(), `.Float32bits(m.`+fieldname, `)`) p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`)
} else { } else {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callFixed32(p.mathPkg.Use(), `.Float32bits(*m.`+fieldname, `)`) p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(*m.`+fieldname, `))`)
} }
} else { } else {
if packed { if packed {
@@ -896,22 +902,30 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
} else if !nullable { } else if !nullable {
accessor = `(&v)` accessor = `(&v)`
} }
p.P(`msgSize := `, accessor, `.Size()`) if protoSizer {
p.P(`msgSize := `, accessor, `.ProtoSize()`)
} else {
p.P(`msgSize := `, accessor, `.Size()`)
}
sum = append(sum, `msgSize + sov`+p.localName+`(uint64(msgSize))`) sum = append(sum, `msgSize + sov`+p.localName+`(uint64(msgSize))`)
} }
p.P(`mapSize := `, strings.Join(sum, " + ")) p.P(`mapSize := `, strings.Join(sum, " + "))
p.callVarint("mapSize") p.callVarint("mapSize")
p.encodeKey(1, wireToType(keywire)) p.encodeKey(1, wireToType(keywire))
p.mapField(numGen, m.KeyField.GetType(), "k") p.mapField(numGen, m.KeyField.GetType(), "k", protoSizer)
p.encodeKey(2, wireToType(valuewire)) p.encodeKey(2, wireToType(valuewire))
p.mapField(numGen, m.ValueField.GetType(), accessor) p.mapField(numGen, m.ValueField.GetType(), accessor, protoSizer)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else if repeated { } else if repeated {
p.P(`for _, msg := range m.`, fieldname, ` {`) p.P(`for _, msg := range m.`, fieldname, ` {`)
p.In() p.In()
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callVarint("msg.Size()") if protoSizer {
p.callVarint("msg.ProtoSize()")
} else {
p.callVarint("msg.Size()")
}
p.P(`n, err := msg.MarshalTo(data[i:])`) p.P(`n, err := msg.MarshalTo(data[i:])`)
p.P(`if err != nil {`) p.P(`if err != nil {`)
p.In() p.In()
@@ -923,7 +937,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`}`) p.P(`}`)
} else { } else {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callVarint(`m.`, fieldname, `.Size()`) if protoSizer {
p.callVarint(`m.`, fieldname, `.ProtoSize()`)
} else {
p.callVarint(`m.`, fieldname, `.Size()`)
}
p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`)
p.P(`if err != nil {`) p.P(`if err != nil {`)
p.In() p.In()
@@ -960,7 +978,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`for _, msg := range m.`, fieldname, ` {`) p.P(`for _, msg := range m.`, fieldname, ` {`)
p.In() p.In()
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callVarint(`msg.Size()`) if protoSizer {
p.callVarint(`msg.ProtoSize()`)
} else {
p.callVarint(`msg.Size()`)
}
p.P(`n, err := msg.MarshalTo(data[i:])`) p.P(`n, err := msg.MarshalTo(data[i:])`)
p.P(`if err != nil {`) p.P(`if err != nil {`)
p.In() p.In()
@@ -972,7 +994,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`}`) p.P(`}`)
} else { } else {
p.encodeKey(fieldNumber, wireType) p.encodeKey(fieldNumber, wireType)
p.callVarint(`m.`, fieldname, `.Size()`) if protoSizer {
p.callVarint(`m.`, fieldname, `.ProtoSize()`)
} else {
p.callVarint(`m.`, fieldname, `.Size()`)
}
p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`)
p.P(`if err != nil {`) p.P(`if err != nil {`)
p.In() p.In()
@@ -1126,7 +1152,11 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) {
p.P(`func (m *`, ccTypeName, `) Marshal() (data []byte, err error) {`) p.P(`func (m *`, ccTypeName, `) Marshal() (data []byte, err error) {`)
p.In() p.In()
p.P(`size := m.Size()`) if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) {
p.P(`size := m.ProtoSize()`)
} else {
p.P(`size := m.Size()`)
}
p.P(`data = make([]byte, size)`) p.P(`data = make([]byte, size)`)
p.P(`n, err := m.MarshalTo(data)`) p.P(`n, err := m.MarshalTo(data)`)
p.P(`if err != nil {`) p.P(`if err != nil {`)

View File

@@ -290,7 +290,7 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato
} else if field.IsMessage() || p.IsGroup(field) { } else if field.IsMessage() || p.IsGroup(field) {
funcCall := getFuncCall(goTypName) funcCall := getFuncCall(goTypName)
if field.IsRepeated() { if field.IsRepeated() {
p.P(p.varGen.Next(), ` := r.Intn(10)`) p.P(p.varGen.Next(), ` := r.Intn(5)`)
p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`)
p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`)
p.In() p.In()
@@ -346,7 +346,7 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato
} }
} else if field.IsBytes() { } else if field.IsBytes() {
if field.IsRepeated() { if field.IsRepeated() {
p.P(p.varGen.Next(), ` := r.Intn(100)`) p.P(p.varGen.Next(), ` := r.Intn(10)`)
p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`)
p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`)
p.In() p.In()
@@ -387,7 +387,7 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato
} else { } else {
typName := generator.GoTypeToName(goTyp) typName := generator.GoTypeToName(goTyp)
if field.IsRepeated() { if field.IsRepeated() {
p.P(p.varGen.Next(), ` := r.Intn(100)`) p.P(p.varGen.Next(), ` := r.Intn(10)`)
p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`)
p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`)
p.In() p.In()

View File

@@ -25,7 +25,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/* /*
The size plugin generates a Size method for each message. The size plugin generates a Size or ProtoSize method for each message.
This is useful with the MarshalTo method generated by the marshalto plugin and the This is useful with the MarshalTo method generated by the marshalto plugin and the
gogoproto.marshaler and gogoproto.marshaler_all extensions. gogoproto.marshaler and gogoproto.marshaler_all extensions.
@@ -33,6 +33,8 @@ It is enabled by the following extensions:
- sizer - sizer
- sizer_all - sizer_all
- protosizer
- protosizer_all
The size plugin also generates a test given it is enabled using one of the following extensions: The size plugin also generates a test given it is enabled using one of the following extensions:
@@ -195,7 +197,7 @@ func (p *size) sizeZigZag() {
}`) }`)
} }
func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto) { func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto, sizeName string) {
fieldname := p.GetOneOfFieldName(message, field) fieldname := p.GetOneOfFieldName(message, field)
nullable := gogoproto.IsNullable(field) nullable := gogoproto.IsNullable(field)
repeated := field.IsRepeated() repeated := field.IsRepeated()
@@ -393,17 +395,17 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag
p.P(`if v != nil {`) p.P(`if v != nil {`)
p.In() p.In()
if valuegoTyp != valuegoAliasTyp { if valuegoTyp != valuegoAliasTyp {
p.P(`l = ((`, valuegoTyp, `)(v)).Size()`) p.P(`l = ((`, valuegoTyp, `)(v)).`, sizeName, `()`)
} else { } else {
p.P(`l = v.Size()`) p.P(`l = v.`, sizeName, `()`)
} }
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else { } else {
if valuegoTyp != valuegoAliasTyp { if valuegoTyp != valuegoAliasTyp {
p.P(`l = ((*`, valuegoTyp, `)(&v)).Size()`) p.P(`l = ((*`, valuegoTyp, `)(&v)).`, sizeName, `()`)
} else { } else {
p.P(`l = v.Size()`) p.P(`l = v.`, sizeName, `()`)
} }
} }
sum = append(sum, `l+sov`+p.localName+`(uint64(l))`) sum = append(sum, `l+sov`+p.localName+`(uint64(l))`)
@@ -415,12 +417,12 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag
} else if repeated { } else if repeated {
p.P(`for _, e := range m.`, fieldname, ` { `) p.P(`for _, e := range m.`, fieldname, ` { `)
p.In() p.In()
p.P(`l=e.Size()`) p.P(`l=e.`, sizeName, `()`)
p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else { } else {
p.P(`l=m.`, fieldname, `.Size()`) p.P(`l=m.`, fieldname, `.`, sizeName, `()`)
p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`)
} }
case descriptor.FieldDescriptorProto_TYPE_BYTES: case descriptor.FieldDescriptorProto_TYPE_BYTES:
@@ -447,12 +449,12 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag
if repeated { if repeated {
p.P(`for _, e := range m.`, fieldname, ` { `) p.P(`for _, e := range m.`, fieldname, ` { `)
p.In() p.In()
p.P(`l=e.Size()`) p.P(`l=e.`, sizeName, `()`)
p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else { } else {
p.P(`l=m.`, fieldname, `.Size()`) p.P(`l=m.`, fieldname, `.`, sizeName, `()`)
p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`)
} }
} }
@@ -501,7 +503,12 @@ func (p *size) Generate(file *generator.FileDescriptor) {
protoPkg = p.NewImport("github.com/golang/protobuf/proto") protoPkg = p.NewImport("github.com/golang/protobuf/proto")
} }
for _, message := range file.Messages() { for _, message := range file.Messages() {
if !gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { sizeName := ""
if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) {
sizeName = "Size"
} else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) {
sizeName = "ProtoSize"
} else {
continue continue
} }
if message.DescriptorProto.GetOptions().GetMapEntry() { if message.DescriptorProto.GetOptions().GetMapEntry() {
@@ -509,7 +516,7 @@ func (p *size) Generate(file *generator.FileDescriptor) {
} }
p.atleastOne = true p.atleastOne = true
ccTypeName := generator.CamelCaseSlice(message.TypeName()) ccTypeName := generator.CamelCaseSlice(message.TypeName())
p.P(`func (m *`, ccTypeName, `) Size() (n int) {`) p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`)
p.In() p.In()
p.P(`var l int`) p.P(`var l int`)
p.P(`_ = l`) p.P(`_ = l`)
@@ -518,7 +525,7 @@ func (p *size) Generate(file *generator.FileDescriptor) {
oneof := field.OneofIndex != nil oneof := field.OneofIndex != nil
if !oneof { if !oneof {
proto3 := gogoproto.IsProto3(file.FileDescriptorProto) proto3 := gogoproto.IsProto3(file.FileDescriptorProto)
p.generateField(proto3, file, message, field) p.generateField(proto3, file, message, field, sizeName)
} else { } else {
fieldname := p.GetFieldName(message, field) fieldname := p.GetFieldName(message, field)
if _, ok := oneofs[fieldname]; ok { if _, ok := oneofs[fieldname]; ok {
@@ -528,7 +535,7 @@ func (p *size) Generate(file *generator.FileDescriptor) {
} }
p.P(`if m.`, fieldname, ` != nil {`) p.P(`if m.`, fieldname, ` != nil {`)
p.In() p.In()
p.P(`n+=m.`, fieldname, `.Size()`) p.P(`n+=m.`, fieldname, `.`, sizeName, `()`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} }
@@ -564,12 +571,12 @@ func (p *size) Generate(file *generator.FileDescriptor) {
continue continue
} }
ccTypeName := p.OneOfTypeName(message, f) ccTypeName := p.OneOfTypeName(message, f)
p.P(`func (m *`, ccTypeName, `) Size() (n int) {`) p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`)
p.In() p.In()
p.P(`var l int`) p.P(`var l int`)
p.P(`_ = l`) p.P(`_ = l`)
vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(f) vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(f)
p.generateField(false, file, message, f) p.generateField(false, file, message, f, sizeName)
p.P(`return n`) p.P(`return n`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)

View File

@@ -51,7 +51,12 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes
} }
for _, message := range file.Messages() { for _, message := range file.Messages() {
ccTypeName := generator.CamelCaseSlice(message.TypeName()) ccTypeName := generator.CamelCaseSlice(message.TypeName())
if !gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { sizeName := ""
if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) {
sizeName = "Size"
} else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) {
sizeName = "ProtoSize"
} else {
continue continue
} }
if message.DescriptorProto.GetOptions().GetMapEntry() { if message.DescriptorProto.GetOptions().GetMapEntry() {
@@ -60,7 +65,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes
if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) {
used = true used = true
p.P(`func Test`, ccTypeName, `Size(t *`, testingPkg.Use(), `.T) {`) p.P(`func Test`, ccTypeName, sizeName, `(t *`, testingPkg.Use(), `.T) {`)
p.In() p.In()
p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`)
p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`)
@@ -72,7 +77,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes
p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
p.P(`size := p.Size()`) p.P(`size := p.`, sizeName, `()`)
p.P(`if len(data) != size {`) p.P(`if len(data) != size {`)
p.In() p.In()
p.P(`t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))`) p.P(`t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))`)
@@ -96,7 +101,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes
if gogoproto.HasBenchGen(file.FileDescriptorProto, message.DescriptorProto) { if gogoproto.HasBenchGen(file.FileDescriptorProto, message.DescriptorProto) {
used = true used = true
p.P(`func Benchmark`, ccTypeName, `Size(b *`, testingPkg.Use(), `.B) {`) p.P(`func Benchmark`, ccTypeName, sizeName, `(b *`, testingPkg.Use(), `.B) {`)
p.In() p.In()
p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`)
p.P(`total := 0`) p.P(`total := 0`)
@@ -109,7 +114,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes
p.P(`b.ResetTimer()`) p.P(`b.ResetTimer()`)
p.P(`for i := 0; i < b.N; i++ {`) p.P(`for i := 0; i < b.N; i++ {`)
p.In() p.In()
p.P(`total += pops[i%1000].Size()`) p.P(`total += pops[i%1000].`, sizeName, `()`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
p.P(`b.SetBytes(int64(total / b.N))`) p.P(`b.SetBytes(int64(total / b.N))`)

View File

@@ -341,7 +341,11 @@ func (p *testProto) Generate(imports generator.PluginImports, file *generator.Fi
p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`)
p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`)
p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`)
p.P(`size := p.Size()`) if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) {
p.P(`size := p.ProtoSize()`)
} else {
p.P(`size := p.Size()`)
}
p.P(`data := make([]byte, size)`) p.P(`data := make([]byte, size)`)
p.P(`for i := range data {`) p.P(`for i := range data {`)
p.In() p.In()

View File

@@ -419,7 +419,7 @@ func (p *unmarshal) noStarOrSliceType(msg *generator.Descriptor, field *descript
return typ return typ
} }
func (p *unmarshal) field(file *descriptor.FileDescriptorProto, msg *generator.Descriptor, field *descriptor.FieldDescriptorProto, fieldname string, proto3 bool) { func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descriptor, field *descriptor.FieldDescriptorProto, fieldname string, proto3 bool) {
repeated := field.IsRepeated() repeated := field.IsRepeated()
nullable := gogoproto.IsNullable(field) nullable := gogoproto.IsNullable(field)
typ := p.noStarOrSliceType(msg, field) typ := p.noStarOrSliceType(msg, field)
@@ -676,7 +676,7 @@ func (p *unmarshal) field(file *descriptor.FileDescriptorProto, msg *generator.D
p.Out() p.Out()
p.P(`}`) p.P(`}`)
p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`)
} else if generator.IsMap(file, field) { } else if generator.IsMap(file.FileDescriptorProto, field) {
m := p.GoMapType(nil, field) m := p.GoMapType(nil, field)
keygoTyp, _ := p.GoType(nil, m.KeyField) keygoTyp, _ := p.GoType(nil, m.KeyField)
@@ -773,7 +773,12 @@ func (p *unmarshal) field(file *descriptor.FileDescriptorProto, msg *generator.D
p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, make([]byte, postIndex-iNdEx))`) p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, make([]byte, postIndex-iNdEx))`)
p.P(`copy(m.`, fieldname, `[len(m.`, fieldname, `)-1], data[iNdEx:postIndex])`) p.P(`copy(m.`, fieldname, `[len(m.`, fieldname, `)-1], data[iNdEx:postIndex])`)
} else { } else {
p.P(`m.`, fieldname, ` = append([]byte{}`, `, data[iNdEx:postIndex]...)`) p.P(`m.`, fieldname, ` = append(m.`, fieldname, `[:0] , data[iNdEx:postIndex]...)`)
p.P(`if m.`, fieldname, ` == nil {`)
p.In()
p.P(`m.`, fieldname, ` = []byte{}`)
p.Out()
p.P(`}`)
} }
} else { } else {
_, ctyp, err := generator.GetCustomType(field) _, ctyp, err := generator.GetCustomType(field)
@@ -1061,13 +1066,13 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) {
p.P(`}`) p.P(`}`)
p.P(`for iNdEx < postIndex {`) p.P(`for iNdEx < postIndex {`)
p.In() p.In()
p.field(file.FileDescriptorProto, message, field, fieldname, false) p.field(file, message, field, fieldname, false)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
p.Out() p.Out()
p.P(`} else if wireType == `, strconv.Itoa(wireType), `{`) p.P(`} else if wireType == `, strconv.Itoa(wireType), `{`)
p.In() p.In()
p.field(file.FileDescriptorProto, message, field, fieldname, false) p.field(file, message, field, fieldname, false)
p.Out() p.Out()
p.P(`} else {`) p.P(`} else {`)
p.In() p.In()
@@ -1080,7 +1085,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) {
p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
p.field(file.FileDescriptorProto, message, field, fieldname, proto3) p.field(file, message, field, fieldname, proto3)
} }
if field.IsRequired() { if field.IsRequired() {

View File

@@ -601,7 +601,7 @@ func (g *Generator) CommandLineParameters(parameter string) {
if pluginList == "none" { if pluginList == "none" {
pluginList = "" pluginList = ""
} }
gogoPluginNames := []string{"unmarshal", "unsafeunmarshaler", "union", "stringer", "size", "populate", "marshalto", "unsafemarshaler", "gostring", "face", "equal", "enumstringer", "embedcheck", "description", "defaultcheck", "oneofcheck"} gogoPluginNames := []string{"unmarshal", "unsafeunmarshaler", "union", "stringer", "size", "protosizer", "populate", "marshalto", "unsafemarshaler", "gostring", "face", "equal", "enumstringer", "embedcheck", "description", "defaultcheck", "oneofcheck"}
pluginList = strings.Join(append(gogoPluginNames, pluginList), "+") pluginList = strings.Join(append(gogoPluginNames, pluginList), "+")
if pluginList != "" { if pluginList != "" {
// Amend the set of plugins. // Amend the set of plugins.
@@ -1857,11 +1857,11 @@ var methodNames = [...]string{
"ExtensionRangeArray", "ExtensionRangeArray",
"ExtensionMap", "ExtensionMap",
"Descriptor", "Descriptor",
"Size",
"MarshalTo", "MarshalTo",
"Equal", "Equal",
"VerboseEqual", "VerboseEqual",
"GoString", "GoString",
"ProtoSize",
} }
// Generate the type and default constant definitions for this Descriptor. // Generate the type and default constant definitions for this Descriptor.
@@ -1875,6 +1875,9 @@ func (g *Generator) generateMessage(message *Descriptor) {
for _, n := range methodNames { for _, n := range methodNames {
usedNames[n] = true usedNames[n] = true
} }
if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) {
usedNames["Size"] = true
}
fieldNames := make(map[*descriptor.FieldDescriptorProto]string) fieldNames := make(map[*descriptor.FieldDescriptorProto]string)
fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string) fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string)
fieldTypes := make(map[*descriptor.FieldDescriptorProto]string) fieldTypes := make(map[*descriptor.FieldDescriptorProto]string)
@@ -2210,6 +2213,9 @@ func (g *Generator) generateMessage(message *Descriptor) {
if gogoproto.IsSizer(g.file.FileDescriptorProto, message.DescriptorProto) { if gogoproto.IsSizer(g.file.FileDescriptorProto, message.DescriptorProto) {
g.P(`Size() int`) g.P(`Size() int`)
} }
if gogoproto.IsProtoSizer(g.file.FileDescriptorProto, message.DescriptorProto) {
g.P(`ProtoSize() int`)
}
g.Out() g.Out()
g.P("}") g.P("}")
} }

View File

@@ -181,6 +181,11 @@ func (g *Generator) GetFieldName(message *Descriptor, field *descriptor.FieldDes
return fieldname + "_" return fieldname + "_"
} }
} }
if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) {
if fieldname == "Size" {
return fieldname + "_"
}
}
return fieldname return fieldname
} }
@@ -198,6 +203,11 @@ func (g *Generator) GetOneOfFieldName(message *Descriptor, field *descriptor.Fie
return fieldname + "_" return fieldname + "_"
} }
} }
if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) {
if fieldname == "Size" {
return fieldname + "_"
}
}
return fieldname return fieldname
} }

View File

@@ -39,5 +39,5 @@ test: install generate-test-pbs
generate-test-pbs: generate-test-pbs:
make install make install
make -C testdata make -C testdata
make -C proto3_proto protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
make make

View File

@@ -29,8 +29,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy. // Protocol buffer deep copy and merge.
// TODO: MessageSet and RawMessage. // TODO: RawMessage.
package proto package proto
@@ -75,12 +75,13 @@ func Merge(dst, src Message) {
} }
func mergeStruct(out, in reflect.Value) { func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ { for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i) f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") { if strings.HasPrefix(f.Name, "XXX_") {
continue continue
} }
mergeAny(out.Field(i), in.Field(i)) mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
} }
if emIn, ok := in.Addr().Interface().(extendableProto); ok { if emIn, ok := in.Addr().Interface().(extendableProto); ok {
@@ -98,7 +99,10 @@ func mergeStruct(out, in reflect.Value) {
} }
} }
func mergeAny(out, in reflect.Value) { // mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType { if in.Type() == protoMessageType {
if !in.IsNil() { if !in.IsNil() {
if out.IsNil() { if out.IsNil() {
@@ -112,7 +116,21 @@ func mergeAny(out, in reflect.Value) {
switch in.Kind() { switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64: reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in) out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map: case reflect.Map:
if in.Len() == 0 { if in.Len() == 0 {
return return
@@ -127,7 +145,7 @@ func mergeAny(out, in reflect.Value) {
switch elemKind { switch elemKind {
case reflect.Ptr: case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem()) val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key)) mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice: case reflect.Slice:
val = in.MapIndex(key) val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
@@ -143,13 +161,21 @@ func mergeAny(out, in reflect.Value) {
if out.IsNil() { if out.IsNil() {
out.Set(reflect.New(in.Elem().Type())) out.Set(reflect.New(in.Elem().Type()))
} }
mergeAny(out.Elem(), in.Elem()) mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice: case reflect.Slice:
if in.IsNil() { if in.IsNil() {
return return
} }
if in.Type().Elem().Kind() == reflect.Uint8 { if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field. // []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy. // Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up // Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result. // with a nil result.
@@ -167,7 +193,7 @@ func mergeAny(out, in reflect.Value) {
default: default:
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem())) x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i)) mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x)) out.Set(reflect.Append(out, x))
} }
} }
@@ -184,7 +210,7 @@ func mergeExtension(out, in map[int32]Extension) {
eOut := Extension{desc: eIn.desc} eOut := Extension{desc: eIn.desc}
if eIn.value != nil { if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem() v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value)) mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface() eOut.value = v.Interface()
} }
if eIn.enc != nil { if eIn.enc != nil {

View File

@@ -46,6 +46,10 @@ import (
// errOverflow is returned when an integer is too large to be represented. // errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow") var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire. // The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are // Those that take integer types all return uint64 and are
// therefore of type valueDecoder. // therefore of type valueDecoder.
@@ -314,6 +318,24 @@ func UnmarshalMerge(buf []byte, pb Message) error {
return NewBuffer(buf).Unmarshal(pb) return NewBuffer(buf).Unmarshal(pb)
} }
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the // Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct // Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be // underlying pb does not match the data in the buffer, the results can be
@@ -377,6 +399,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
continue continue
} }
} }
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField) err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue continue
} }
@@ -518,9 +554,7 @@ func (o *Buffer) dec_string(p *Properties, base structPointer) error {
if err != nil { if err != nil {
return err return err
} }
sp := new(string) *structPointer_String(base, p.field) = &s
*sp = s
*structPointer_String(base, p.field) = sp
return nil return nil
} }
@@ -563,9 +597,13 @@ func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error
return err return err
} }
nb := int(nn) // number of bytes of encoded bools nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v y := *v
for i := 0; i < nb; i++ { for o.index < fin {
u, err := p.valDec(o) u, err := p.valDec(o)
if err != nil { if err != nil {
return err return err
@@ -677,7 +715,7 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
oi := o.index // index at the end of this map entry oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() { if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
} }
@@ -729,8 +767,14 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
return fmt.Errorf("proto: bad map data tag %d", raw[0]) return fmt.Errorf("proto: bad map data tag %d", raw[0])
} }
} }
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() || !valelem.IsValid() {
// We did not decode the key or the value in the map entry.
// Either way, it's an invalid map entry.
return fmt.Errorf("proto: bad map data: missing key/val")
}
v.SetMapIndex(keyptr.Elem(), valptr.Elem()) v.SetMapIndex(keyelem, valelem)
return nil return nil
} }

View File

@@ -60,9 +60,9 @@ func (e *RequiredNotSetError) Error() string {
} }
var ( var (
// ErrRepeatedHasNil is the error returned if Marshal is called with // errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element. // a struct with a repeated field containing a nil element.
ErrRepeatedHasNil = errors.New("proto: repeated field has nil element") errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// ErrNil is the error returned if Marshal is called with nil. // ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil") ErrNil = errors.New("proto: Marshal called with nil")
@@ -105,6 +105,11 @@ func (p *Buffer) EncodeVarint(x uint64) error {
return nil return nil
} }
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
return sizeVarint(x)
}
func sizeVarint(x uint64) (n int) { func sizeVarint(x uint64) (n int) {
for { for {
n++ n++
@@ -228,6 +233,20 @@ func Marshal(pb Message) ([]byte, error) {
return p.buf, err return p.buf, err
} }
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
t, base, err := getbase(pb)
if structPointer_IsNil(base) {
return ErrNil
}
if err == nil {
var state errorState
err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
}
return err
}
// Marshal takes the protocol buffer // Marshal takes the protocol buffer
// and encodes it into the wire format, writing the result to the // and encodes it into the wire format, writing the result to the
// Buffer. // Buffer.
@@ -318,7 +337,7 @@ func size_bool(p *Properties, base structPointer) int {
func size_proto3_bool(p *Properties, base structPointer) int { func size_proto3_bool(p *Properties, base structPointer) int {
v := *structPointer_BoolVal(base, p.field) v := *structPointer_BoolVal(base, p.field)
if !v { if !v && !p.oneof {
return 0 return 0
} }
return len(p.tagcode) + 1 // each bool takes exactly one byte return len(p.tagcode) + 1 // each bool takes exactly one byte
@@ -361,7 +380,7 @@ func size_int32(p *Properties, base structPointer) (n int) {
func size_proto3_int32(p *Properties, base structPointer) (n int) { func size_proto3_int32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field) v := structPointer_Word32Val(base, p.field)
x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
if x == 0 { if x == 0 && !p.oneof {
return 0 return 0
} }
n += len(p.tagcode) n += len(p.tagcode)
@@ -407,7 +426,7 @@ func size_uint32(p *Properties, base structPointer) (n int) {
func size_proto3_uint32(p *Properties, base structPointer) (n int) { func size_proto3_uint32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field) v := structPointer_Word32Val(base, p.field)
x := word32Val_Get(v) x := word32Val_Get(v)
if x == 0 { if x == 0 && !p.oneof {
return 0 return 0
} }
n += len(p.tagcode) n += len(p.tagcode)
@@ -452,7 +471,7 @@ func size_int64(p *Properties, base structPointer) (n int) {
func size_proto3_int64(p *Properties, base structPointer) (n int) { func size_proto3_int64(p *Properties, base structPointer) (n int) {
v := structPointer_Word64Val(base, p.field) v := structPointer_Word64Val(base, p.field)
x := word64Val_Get(v) x := word64Val_Get(v)
if x == 0 { if x == 0 && !p.oneof {
return 0 return 0
} }
n += len(p.tagcode) n += len(p.tagcode)
@@ -495,7 +514,7 @@ func size_string(p *Properties, base structPointer) (n int) {
func size_proto3_string(p *Properties, base structPointer) (n int) { func size_proto3_string(p *Properties, base structPointer) (n int) {
v := *structPointer_StringVal(base, p.field) v := *structPointer_StringVal(base, p.field)
if v == "" { if v == "" && !p.oneof {
return 0 return 0
} }
n += len(p.tagcode) n += len(p.tagcode)
@@ -529,7 +548,7 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
} }
o.buf = append(o.buf, p.tagcode...) o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data) o.EncodeRawBytes(data)
return nil return state.err
} }
o.buf = append(o.buf, p.tagcode...) o.buf = append(o.buf, p.tagcode...)
@@ -667,7 +686,7 @@ func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error
func size_slice_byte(p *Properties, base structPointer) (n int) { func size_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field) s := *structPointer_Bytes(base, p.field)
if s == nil { if s == nil && !p.oneof {
return 0 return 0
} }
n += len(p.tagcode) n += len(p.tagcode)
@@ -677,7 +696,7 @@ func size_slice_byte(p *Properties, base structPointer) (n int) {
func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field) s := *structPointer_Bytes(base, p.field)
if len(s) == 0 { if len(s) == 0 && !p.oneof {
return 0 return 0
} }
n += len(p.tagcode) n += len(p.tagcode)
@@ -939,7 +958,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
for i := 0; i < l; i++ { for i := 0; i < l; i++ {
structp := s.Index(i) structp := s.Index(i)
if structPointer_IsNil(structp) { if structPointer_IsNil(structp) {
return ErrRepeatedHasNil return errRepeatedHasNil
} }
// Can the object marshal itself? // Can the object marshal itself?
@@ -958,7 +977,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
err := o.enc_len_struct(p.sprop, structp, &state) err := o.enc_len_struct(p.sprop, structp, &state)
if err != nil && !state.shouldContinue(err, nil) { if err != nil && !state.shouldContinue(err, nil) {
if err == ErrNil { if err == ErrNil {
return ErrRepeatedHasNil return errRepeatedHasNil
} }
return err return err
} }
@@ -1001,7 +1020,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error
for i := 0; i < l; i++ { for i := 0; i < l; i++ {
b := s.Index(i) b := s.Index(i)
if structPointer_IsNil(b) { if structPointer_IsNil(b) {
return ErrRepeatedHasNil return errRepeatedHasNil
} }
o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
@@ -1010,7 +1029,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error
if err != nil && !state.shouldContinue(err, nil) { if err != nil && !state.shouldContinue(err, nil) {
if err == ErrNil { if err == ErrNil {
return ErrRepeatedHasNil return errRepeatedHasNil
} }
return err return err
} }
@@ -1084,7 +1103,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
repeated MapFieldEntry map_field = N; repeated MapFieldEntry map_field = N;
*/ */
v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
if v.Len() == 0 { if v.Len() == 0 {
return nil return nil
} }
@@ -1101,11 +1120,15 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
return nil return nil
} }
keys := v.MapKeys() // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
sort.Sort(mapKeys(keys)) for _, key := range v.MapKeys() {
for _, key := range keys {
val := v.MapIndex(key) val := v.MapIndex(key)
// The only illegal map entry values are nil message pointers.
if val.Kind() == reflect.Ptr && val.IsNil() {
return errors.New("proto: map has nil element")
}
keycopy.Set(key) keycopy.Set(key)
valcopy.Set(val) valcopy.Set(val)
@@ -1118,7 +1141,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
} }
func size_new_map(p *Properties, base structPointer) int { func size_new_map(p *Properties, base structPointer) int {
v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
@@ -1128,10 +1151,12 @@ func size_new_map(p *Properties, base structPointer) int {
keycopy.Set(key) keycopy.Set(key)
valcopy.Set(val) valcopy.Set(val)
// Tag codes are two bytes per map entry. // Tag codes for key and val are the responsibility of the sub-sizer.
n += 2 keysize := p.mkeyprop.size(p.mkeyprop, keybase)
n += p.mkeyprop.size(p.mkeyprop, keybase) valsize := p.mvalprop.size(p.mvalprop, valbase)
n += p.mvalprop.size(p.mvalprop, valbase) entry := keysize + valsize
// Add on tag code and length of map entry itself.
n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
} }
return n return n
} }
@@ -1184,6 +1209,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
if p.Required && state.err == nil { if p.Required && state.err == nil {
state.err = &RequiredNotSetError{p.Name} state.err = &RequiredNotSetError{p.Name}
} }
} else if err == errRepeatedHasNil {
// Give more context to nil values in repeated fields.
return errors.New("repeated field " + p.OrigName + " has nil element")
} else if !state.shouldContinue(err, p) { } else if !state.shouldContinue(err, p) {
return err return err
} }
@@ -1191,6 +1219,14 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
} }
} }
// Do oneof fields.
if prop.oneofMarshaler != nil {
m := structPointer_Interface(base, prop.stype).(Message)
if err := prop.oneofMarshaler(m, o); err != nil {
return err
}
}
// Add unrecognized fields at the end. // Add unrecognized fields at the end.
if prop.unrecField.IsValid() { if prop.unrecField.IsValid() {
v := *structPointer_Bytes(base, prop.unrecField) v := *structPointer_Bytes(base, prop.unrecField)
@@ -1216,6 +1252,12 @@ func size_struct(prop *StructProperties, base structPointer) (n int) {
n += len(v) n += len(v)
} }
// Factor in any oneof fields.
if prop.oneofSizer != nil {
m := structPointer_Interface(base, prop.stype).(Message)
n += prop.oneofSizer(m)
}
return return
} }

View File

@@ -30,7 +30,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison. // Protocol buffer comparison.
// TODO: MessageSet.
package proto package proto
@@ -51,7 +50,9 @@ Equality is defined in this way:
are equal, and extensions sets are equal. are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal. - Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same, - Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field, and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field) although represented by []byte, is not a repeated field)
@@ -89,6 +90,7 @@ func Equal(a, b Message) bool {
// v1 and v2 are known to have the same type. // v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool { func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ { for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i) f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") { if strings.HasPrefix(f.Name, "XXX_") {
@@ -114,7 +116,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
} }
f1, f2 = f1.Elem(), f2.Elem() f1, f2 = f1.Elem(), f2.Elem()
} }
if !equalAny(f1, f2) { if !equalAny(f1, f2, sprop.Prop[i]) {
return false return false
} }
} }
@@ -141,7 +143,8 @@ func equalStruct(v1, v2 reflect.Value) bool {
} }
// v1 and v2 are known to have the same type. // v1 and v2 are known to have the same type.
func equalAny(v1, v2 reflect.Value) bool { // prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType { if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message) m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message) m2, _ := v2.Interface().(Message)
@@ -154,6 +157,17 @@ func equalAny(v1, v2 reflect.Value) bool {
return v1.Float() == v2.Float() return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64: case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int() return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map: case reflect.Map:
if v1.Len() != v2.Len() { if v1.Len() != v2.Len() {
return false return false
@@ -164,16 +178,22 @@ func equalAny(v1, v2 reflect.Value) bool {
// This key was not found in the second map. // This key was not found in the second map.
return false return false
} }
if !equalAny(v1.MapIndex(key), val2) { if !equalAny(v1.MapIndex(key), val2, nil) {
return false return false
} }
} }
return true return true
case reflect.Ptr: case reflect.Ptr:
return equalAny(v1.Elem(), v2.Elem()) return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice: case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 { if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte // short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() { if v1.IsNil() != v2.IsNil() {
return false return false
} }
@@ -184,7 +204,7 @@ func equalAny(v1, v2 reflect.Value) bool {
return false return false
} }
for i := 0; i < v1.Len(); i++ { for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i)) { if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false return false
} }
} }
@@ -219,7 +239,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if m1 != nil && m2 != nil { if m1 != nil && m2 != nil {
// Both are unencoded. // Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false return false
} }
continue continue
@@ -247,7 +267,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false return false
} }
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false return false
} }
} }

View File

@@ -37,6 +37,7 @@ package proto
import ( import (
"errors" "errors"
"fmt"
"reflect" "reflect"
"strconv" "strconv"
"sync" "sync"
@@ -221,7 +222,7 @@ func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
} }
// GetExtension parses and returns the given extension of pb. // GetExtension parses and returns the given extension of pb.
// If the extension is not present it returns ErrMissingExtension. // If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil { if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err return nil, err
@@ -230,8 +231,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
emap := pb.ExtensionMap() emap := pb.ExtensionMap()
e, ok := emap[extension.Field] e, ok := emap[extension.Field]
if !ok { if !ok {
return nil, ErrMissingExtension // defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
} }
if e.value != nil { if e.value != nil {
// Already decoded. Check the descriptor, though. // Already decoded. Check the descriptor, though.
if e.desc != extension { if e.desc != extension {
@@ -257,12 +261,46 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
return e.value, nil return e.value, nil
} }
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b. // decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b) o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType) t := reflect.TypeOf(extension.ExtensionType)
rep := extension.repeated()
props := extensionProperties(extension) props := extensionProperties(extension)
@@ -284,7 +322,7 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
return nil, err return nil, err
} }
if !rep || o.index >= len(o.buf) { if o.index >= len(o.buf) {
break break
} }
} }
@@ -321,6 +359,14 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
if typ != reflect.TypeOf(value) { if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type") return errors.New("proto: bad extension value type")
} }
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil return nil

View File

@@ -30,171 +30,237 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/* /*
Package proto converts data structures to and from the wire format of Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler. for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface A summary of the properties of the protocol buffer interface
for a protocol buffer variable v: for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export. - Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat - There are no methods on v to set fields; just treat
them as structure fields. them as structure fields.
- There are getters that return a field's value if set, - There are getters that return a field's value if set,
and return the field's default value if unset. and return the field's default value if unset.
The getters work even if the receiver is a nil message. The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state. - The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling. All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state. - A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset. - Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32. That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices. - Repeated fields are slices.
- Helper functions are available to aid the setting of fields. - Helper functions are available to aid the setting of fields.
Helpers for getting values are superseded by the msg.Foo = proto.String("hello") // set field
GetFoo methods and their use is deprecated. - Constants are defined to hold the default values of all fields that
msg.Foo = proto.String("hello") // set field have them. They have the form Default_StructName_FieldName.
- Constants are defined to hold the default values of all fields that Because the getter methods handle defaulted values,
have them. They have the form Default_StructName_FieldName. direct use of these constants should be rare.
Because the getter methods handle defaulted values, - Enums are given type names and maps from names to values.
direct use of these constants should be rare. Enum values are prefixed by the enclosing message's name, or by the
- Enums are given type names and maps from names to values. enum's type name if it is a top-level enum. Enum types have a String
Enum values are prefixed with the enum's type name. Enum types have method, and a Enum method to assist in message construction.
a String method, and a Enum method to assist in message construction. - Nested messages, groups and enums have type names prefixed with the name of
- Nested groups and enums have type names prefixed with the name of the surrounding message type.
the surrounding message type. - Extensions are given descriptor names that start with E_,
- Extensions are given descriptor names that start with E_, followed by an underscore-delimited list of the nested messages
followed by an underscore-delimited list of the nested messages that contain it (if any) followed by the CamelCased name of the
that contain it (if any) followed by the CamelCased name of the extension field itself. HasExtension, ClearExtension, GetExtension
extension field itself. HasExtension, ClearExtension, GetExtension and SetExtension are functions for manipulating extensions.
and SetExtension are functions for manipulating extensions. - Oneof field sets are given a single field in their message,
- Marshal and Unmarshal are functions to encode and decode the wire format. with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
The simplest way to describe this is to see an example. When the .proto file specifies `syntax="proto3"`, there are some differences:
Given file test.proto, containing
package example; - Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
enum FOO { X = 17; }; The simplest way to describe this is to see an example.
Given file test.proto, containing
message Test { package example;
required string label = 1;
optional int32 type = 2 [default=77]; enum FOO { X = 17; }
repeated int64 reps = 3;
optional group OptionalGroup = 4 { message Test {
required string RequiredField = 5; required string label = 1;
} optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
} }
*x = FOO(value)
return nil
}
The resulting file, test.pb.go, is: type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
package example type isTest_Union interface {
isTest_Union()
}
import "github.com/golang/protobuf/proto" type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
type FOO int32 func (*Test_Number) isTest_Union() {}
const ( func (*Test_Name) isTest_Union() {}
FOO_X FOO = 17
) func (m *Test) GetUnion() isTest_Union {
var FOO_name = map[int32]string{ if m != nil {
17: "X", return m.Union
} }
var FOO_value = map[string]int32{ return nil
"X": 17, }
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
} }
return ""
}
func (x FOO) Enum() *FOO { func (m *Test) GetType() int32 {
p := new(FOO) if m != nil && m.Type != nil {
*p = x return *m.Type
return p
} }
func (x FOO) String() string { return Default_Test_Type
return proto.EnumName(FOO_name, int32(x)) }
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
} }
return nil
}
type Test struct { type Test_OptionalGroup struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` }
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
XXX_unrecognized []byte `json:"-"`
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
} }
func (this *Test) Reset() { *this = Test{} } return ""
func (this *Test) String() string { return proto.CompactTextString(this) } }
const Default_Test_Type int32 = 77
func (this *Test) GetLabel() string { func (m *Test) GetNumber() int32 {
if this != nil && this.Label != nil { if x, ok := m.GetUnion().(*Test_Number); ok {
return *this.Label return x.Number
}
return ""
} }
return 0
}
func (this *Test) GetType() int32 { func (m *Test) GetName() string {
if this != nil && this.Type != nil { if x, ok := m.GetUnion().(*Test_Name); ok {
return *this.Type return x.Name
}
return Default_Test_Type
} }
return ""
}
func (this *Test) GetOptionalgroup() *Test_OptionalGroup { func init() {
if this != nil { proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
return this.Optionalgroup }
}
return nil To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
} }
data, err := proto.Marshal(test)
type Test_OptionalGroup struct { if err != nil {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` log.Fatal("marshaling error: ", err)
XXX_unrecognized []byte `json:"-"`
} }
func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} } newTest := &pb.Test{}
func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) } err = proto.Unmarshal(data, newTest)
if err != nil {
func (this *Test_OptionalGroup) GetRequiredField() string { log.Fatal("unmarshaling error: ", err)
if this != nil && this.RequiredField != nil {
return *this.RequiredField
}
return ""
} }
// Now test and newTest contain the same data.
func init() { if test.GetLabel() != newTest.GetLabel() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value) log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
} }
// Use a type switch to determine which oneof was set.
To create and play with a Test object: switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
package main case *pb.Test_Name: // u.Name contains the string.
import (
"log"
"github.com/golang/protobuf/proto"
"./example.pb"
)
func main() {
test := &example.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Optionalgroup: &example.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := new(example.Test)
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// etc.
} }
// etc.
}
*/ */
package proto package proto
@@ -203,6 +269,7 @@ import (
"fmt" "fmt"
"log" "log"
"reflect" "reflect"
"sort"
"strconv" "strconv"
"sync" "sync"
) )
@@ -377,13 +444,13 @@ func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32,
// DebugPrint dumps the encoded data in b in a debugging format with a header // DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging. // including the string s. Used in testing but made available for general debugging.
func (o *Buffer) DebugPrint(s string, b []byte) { func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64 var u uint64
obuf := o.buf obuf := p.buf
index := o.index index := p.index
o.buf = b p.buf = b
o.index = 0 p.index = 0
depth := 0 depth := 0
fmt.Printf("\n--- %s ---\n", s) fmt.Printf("\n--- %s ---\n", s)
@@ -394,12 +461,12 @@ out:
fmt.Print(" ") fmt.Print(" ")
} }
index := o.index index := p.index
if index == len(o.buf) { if index == len(p.buf) {
break break
} }
op, err := o.DecodeVarint() op, err := p.DecodeVarint()
if err != nil { if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err) fmt.Printf("%3d: fetching op err %v\n", index, err)
break out break out
@@ -416,7 +483,7 @@ out:
case WireBytes: case WireBytes:
var r []byte var r []byte
r, err = o.DecodeRawBytes(false) r, err = p.DecodeRawBytes(false)
if err != nil { if err != nil {
break out break out
} }
@@ -437,7 +504,7 @@ out:
fmt.Printf("\n") fmt.Printf("\n")
case WireFixed32: case WireFixed32:
u, err = o.DecodeFixed32() u, err = p.DecodeFixed32()
if err != nil { if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out break out
@@ -445,16 +512,15 @@ out:
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64: case WireFixed64:
u, err = o.DecodeFixed64() u, err = p.DecodeFixed64()
if err != nil { if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out break out
} }
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
break
case WireVarint: case WireVarint:
u, err = o.DecodeVarint() u, err = p.DecodeVarint()
if err != nil { if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out break out
@@ -462,30 +528,22 @@ out:
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup: case WireStartGroup:
if err != nil {
fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d start\n", index, tag) fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++ depth++
case WireEndGroup: case WireEndGroup:
depth-- depth--
if err != nil {
fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d end\n", index, tag) fmt.Printf("%3d: t=%3d end\n", index, tag)
} }
} }
if depth != 0 { if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth) fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
} }
fmt.Printf("\n") fmt.Printf("\n")
o.buf = obuf p.buf = obuf
o.index = index p.index = index
} }
// SetDefaults sets unset protocol buffer fields to their default values. // SetDefaults sets unset protocol buffer fields to their default values.
@@ -599,13 +657,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
for _, ni := range dm.nested { for _, ni := range dm.nested {
f := v.Field(ni) f := v.Field(ni)
if f.IsNil() { // f is *T or []*T or map[T]*T
continue switch f.Kind() {
} case reflect.Ptr:
// f is *T or []*T if f.IsNil() {
if f.Kind() == reflect.Ptr { continue
}
setDefaults(f, recur, zeros) setDefaults(f, recur, zeros)
} else {
case reflect.Slice:
for i := 0; i < f.Len(); i++ { for i := 0; i < f.Len(); i++ {
e := f.Index(i) e := f.Index(i)
if e.IsNil() { if e.IsNil() {
@@ -613,6 +673,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
} }
setDefaults(e, recur, zeros) setDefaults(e, recur, zeros)
} }
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
} }
} }
} }
@@ -638,10 +707,6 @@ type scalarField struct {
value interface{} // the proto-declared default value, or nil value interface{} // the proto-declared default value, or nil
} }
func ptrToStruct(t reflect.Type) bool {
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
// t is a struct type. // t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t) sprop := GetProperties(t)
@@ -653,99 +718,177 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
} }
ft := t.Field(fi).Type ft := t.Field(fi).Type
// nested messages sf, nested, err := fieldDefault(ft, prop)
if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) { switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi) dm.nested = append(dm.nested, fi)
continue case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
} }
sf := scalarField{
index: fi,
kind: ft.Elem().Kind(),
}
// scalar fields without defaults
if !prop.HasDefault {
dm.scalars = append(dm.scalars, sf)
continue
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
log.Printf("proto: bad default bool %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
continue
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
continue
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
continue
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
continue
}
sf.value = x
default:
log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
continue
}
dm.scalars = append(dm.scalars, sf)
} }
return dm return dm
} }
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums. // Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt. // The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options, // If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform. // such as doing a Schwartzian transform.
type mapKeys []reflect.Value func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
func (s mapKeys) Len() int { return len(s) } // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // numeric keys are sorted numerically.
func (s mapKeys) Less(i, j int) bool { if len(vs) == 0 {
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
}
return s
} }
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true

View File

@@ -44,11 +44,11 @@ import (
"sort" "sort"
) )
// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set. // A message type ID is required for storing a protocol buffer in a message set.
var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and MessageSet) // The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message: // model what the protocol compiler produces for the following protocol message:
// message MessageSet { // message MessageSet {
// repeated group Item = 1 { // repeated group Item = 1 {
@@ -58,27 +58,20 @@ var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
// } // }
// That is the MessageSet wire format. We can't use a proto to generate these // That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package. // because that would introduce a circular dependency between it and this package.
//
// When a proto1 proto has a field that looks like:
// optional message<MessageSet> info = 3;
// the protocol compiler produces a field in the generated struct that looks like:
// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
// The package is automatically inserted so there is no need for that proto file to
// import this package.
type _MessageSet_Item struct { type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"` TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"` Message []byte `protobuf:"bytes,3,req,name=message"`
} }
type MessageSet struct { type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"` Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte XXX_unrecognized []byte
// TODO: caching? // TODO: caching?
} }
// Make sure MessageSet is a Message. // Make sure messageSet is a Message.
var _ Message = (*MessageSet)(nil) var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type // messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet. // that may be stored in a MessageSet.
@@ -86,7 +79,7 @@ type messageTypeIder interface {
MessageTypeId() int32 MessageTypeId() int32
} }
func (ms *MessageSet) find(pb Message) *_MessageSet_Item { func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder) mti, ok := pb.(messageTypeIder)
if !ok { if !ok {
return nil return nil
@@ -100,24 +93,24 @@ func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
return nil return nil
} }
func (ms *MessageSet) Has(pb Message) bool { func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil { if ms.find(pb) != nil {
return true return true
} }
return false return false
} }
func (ms *MessageSet) Unmarshal(pb Message) error { func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil { if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb) return Unmarshal(item.Message, pb)
} }
if _, ok := pb.(messageTypeIder); !ok { if _, ok := pb.(messageTypeIder); !ok {
return ErrNoMessageTypeId return errNoMessageTypeID
} }
return nil // TODO: return error instead? return nil // TODO: return error instead?
} }
func (ms *MessageSet) Marshal(pb Message) error { func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb) msg, err := Marshal(pb)
if err != nil { if err != nil {
return err return err
@@ -130,7 +123,7 @@ func (ms *MessageSet) Marshal(pb Message) error {
mti, ok := pb.(messageTypeIder) mti, ok := pb.(messageTypeIder)
if !ok { if !ok {
return ErrNoMessageTypeId return errNoMessageTypeID
} }
mtid := mti.MessageTypeId() mtid := mti.MessageTypeId()
@@ -141,9 +134,9 @@ func (ms *MessageSet) Marshal(pb Message) error {
return nil return nil
} }
func (ms *MessageSet) Reset() { *ms = MessageSet{} } func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *MessageSet) String() string { return CompactTextString(ms) } func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*MessageSet) ProtoMessage() {} func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option. // Support for the message_set_wire_format message option.
@@ -169,7 +162,7 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
} }
sort.Ints(ids) sort.Ints(ids)
ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids { for _, id := range ids {
e := m[int32(id)] e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint. // Remove the wire type and field number varint, as well as the length varint.
@@ -186,7 +179,7 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
ms := new(MessageSet) ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil { if err := Unmarshal(buf, ms); err != nil {
return err return err
} }

View File

@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine,!appenginevm // +build appengine
// This file contains an implementation of proto field accesses using package reflect. // This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@@ -144,8 +144,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension) return structPointer_ifield(p, f).(*map[int32]Extension)
} }
// Map returns the reflect.Value for the address of a map field in the struct. // NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr() return structPointer_field(p, f).Addr()
} }

View File

@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine appenginevm // +build !appengine
// This file contains the implementation of the proto field accesses using package unsafe. // This file contains the implementation of the proto field accesses using package unsafe.
@@ -130,8 +130,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
} }
// Map returns the reflect.Value for the address of a map field in the struct. // NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
} }

View File

@@ -37,6 +37,7 @@ package proto
import ( import (
"fmt" "fmt"
"log"
"os" "os"
"reflect" "reflect"
"sort" "sort"
@@ -84,6 +85,15 @@ type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding. // A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error) type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer // tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag // use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers. // numbers.
@@ -132,6 +142,22 @@ type StructProperties struct {
order []int // list of struct field numbers in tag order order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
} }
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
@@ -156,6 +182,7 @@ type Properties struct {
Packed bool // relevant for repeated primitives only Packed bool // relevant for repeated primitives only
Enum string // set for enum types only Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value Default string // default value
HasDefault bool // whether an explicit default was provided HasDefault bool // whether an explicit default was provided
@@ -208,6 +235,9 @@ func (p *Properties) String() string {
if p.proto3 { if p.proto3 {
s += ",proto3" s += ",proto3"
} }
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 { if len(p.Enum) > 0 {
s += ",enum=" + p.Enum s += ",enum=" + p.Enum
} }
@@ -284,6 +314,8 @@ func (p *Properties) Parse(s string) {
p.Enum = f[5:] p.Enum = f[5:]
case f == "proto3": case f == "proto3":
p.proto3 = true p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="): case strings.HasPrefix(f, "def="):
p.HasDefault = true p.HasDefault = true
p.Default = f[4:] // rest of string p.Default = f[4:] // rest of string
@@ -440,7 +472,12 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
p.enc = (*Buffer).enc_slice_byte p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte p.size = size_slice_byte
if p.proto3 { // This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte p.size = size_proto3_slice_byte
} }
@@ -595,7 +632,7 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
} }
var ( var (
mutex sync.Mutex propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties) propertiesMap = make(map[reflect.Type]*StructProperties)
) )
@@ -605,13 +642,26 @@ func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct { if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct") panic("proto: type must have kind struct")
} }
mutex.Lock()
sprop := getPropertiesLocked(t) // Most calls to GetProperties in a long-running program will be
mutex.Unlock() // retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop return sprop
} }
// getPropertiesLocked requires that mutex is held. // getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties { func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok { if prop, ok := propertiesMap[t]; ok {
if collectStats { if collectStats {
@@ -647,6 +697,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
if f.Name == "XXX_unrecognized" { // special case if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f) prop.unrecField = toField(&f)
} }
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
prop.Prop[i] = p prop.Prop[i] = p
prop.order[i] = i prop.order[i] = i
if debug { if debug {
@@ -656,7 +707,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
} }
print("\n") print("\n")
} }
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
} }
} }
@@ -664,6 +715,41 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order. // Re-order prop.order.
sort.Sort(prop) sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts // build required counts
// build tags // build tags
reqCount := 0 reqCount := 0
@@ -722,3 +808,35 @@ func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[
} }
enumValueMaps[typeName] = valueMap enumValueMaps[typeName] = valueMap
} }
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypes = make(map[string]reflect.Type)
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }

View File

@@ -1,44 +0,0 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2014 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include ../../Make.protobuf
all: regenerate
regenerate:
rm -f proto3.pb.go
make proto3.pb.go
# The following rules are just aids to development. Not needed for typical testing.
diff: regenerate
git diff proto3.pb.go

View File

@@ -0,0 +1,122 @@
// Code generated by protoc-gen-go.
// source: proto3_proto/proto3.proto
// DO NOT EDIT!
/*
Package proto3_proto is a generated protocol buffer package.
It is generated from these files:
proto3_proto/proto3.proto
It has these top-level messages:
Message
Nested
MessageWithMap
*/
package proto3_proto
import proto "github.com/golang/protobuf/proto"
import testdata "github.com/golang/protobuf/proto/testdata"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
type Message_Humour int32
const (
Message_UNKNOWN Message_Humour = 0
Message_PUNS Message_Humour = 1
Message_SLAPSTICK Message_Humour = 2
Message_BILL_BAILEY Message_Humour = 3
)
var Message_Humour_name = map[int32]string{
0: "UNKNOWN",
1: "PUNS",
2: "SLAPSTICK",
3: "BILL_BAILEY",
}
var Message_Humour_value = map[string]int32{
"UNKNOWN": 0,
"PUNS": 1,
"SLAPSTICK": 2,
"BILL_BAILEY": 3,
}
func (x Message_Humour) String() string {
return proto.EnumName(Message_Humour_name, int32(x))
}
type Message struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"`
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"`
Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"`
Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"`
Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (m *Message) GetNested() *Nested {
if m != nil {
return m.Nested
}
return nil
}
func (m *Message) GetTerrain() map[string]*Nested {
if m != nil {
return m.Terrain
}
return nil
}
func (m *Message) GetProto2Field() *testdata.SubDefaults {
if m != nil {
return m.Proto2Field
}
return nil
}
func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
if m != nil {
return m.Proto2Value
}
return nil
}
type Nested struct {
Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
}
func (m *Nested) Reset() { *m = Nested{} }
func (m *Nested) String() string { return proto.CompactTextString(m) }
func (*Nested) ProtoMessage() {}
type MessageWithMap struct {
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
func (*MessageWithMap) ProtoMessage() {}
func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
if m != nil {
return m.ByteMapping
}
return nil
}
func init() {
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
}

View File

@@ -31,6 +31,8 @@
syntax = "proto3"; syntax = "proto3";
import "testdata/test.proto";
package proto3_proto; package proto3_proto;
message Message { message Message {
@@ -51,8 +53,16 @@ message Message {
repeated uint64 key = 5; repeated uint64 key = 5;
Nested nested = 6; Nested nested = 6;
map<string, Nested> terrain = 10;
testdata.SubDefaults proto2_field = 11;
map<string, testdata.SubDefaults> proto2_value = 13;
} }
message Nested { message Nested {
string bunny = 1; string bunny = 1;
} }
message MessageWithMap {
map<bool, bytes> byte_mapping = 1;
}

View File

@@ -37,11 +37,11 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"encoding" "encoding"
"errors"
"fmt" "fmt"
"io" "io"
"log" "log"
"math" "math"
"os"
"reflect" "reflect"
"sort" "sort"
"strings" "strings"
@@ -170,20 +170,12 @@ func writeName(w *textWriter, props *Properties) error {
return nil return nil
} }
var (
messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
)
// raw is the interface satisfied by RawMessage. // raw is the interface satisfied by RawMessage.
type raw interface { type raw interface {
Bytes() []byte Bytes() []byte
} }
func writeStruct(w *textWriter, sv reflect.Value) error { func writeStruct(w *textWriter, sv reflect.Value) error {
if sv.Type() == messageSetType {
return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
}
st := sv.Type() st := sv.Type()
sprops := GetProperties(st) sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ { for i := 0; i < sv.NumField(); i++ {
@@ -246,7 +238,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
} }
if fv.Kind() == reflect.Map { if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields. // Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys() // TODO: should we sort these for deterministic output? keys := fv.MapKeys()
sort.Sort(mapKeys(keys)) sort.Sort(mapKeys(keys))
for _, key := range keys { for _, key := range keys {
val := fv.MapIndex(key) val := fv.MapIndex(key)
@@ -283,20 +275,23 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
if err := w.WriteByte('\n'); err != nil { if err := w.WriteByte('\n'); err != nil {
return err return err
} }
// value // nil values aren't legal, but we can avoid panicking because of them.
if _, err := w.WriteString("value:"); err != nil { if val.Kind() != reflect.Ptr || !val.IsNil() {
return err // value
} if _, err := w.WriteString("value:"); err != nil {
if !w.compact { return err
if err := w.WriteByte(' '); err != nil { }
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err return err
} }
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
} }
// close struct // close struct
w.unindent() w.unindent()
@@ -315,26 +310,34 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
} }
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value // proto3 non-repeated scalar field; skip if zero value
switch fv.Kind() { if isProto3Zero(fv) {
case reflect.Bool: continue
if !fv.Bool() { }
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue continue
} }
case reflect.Int32, reflect.Int64: inner := fv.Elem().Elem() // interface -> *T -> T
if fv.Int() == 0 { tag := inner.Type().Field(0).Tag.Get("protobuf")
continue props = new(Properties) // Overwrite the outer props var, but not its pointee.
} props.Parse(tag)
case reflect.Uint32, reflect.Uint64: // Write the value in the oneof, not the oneof itself.
if fv.Uint() == 0 { fv = inner.Field(0)
continue
} // Special case to cope with malformed messages gracefully:
case reflect.Float32, reflect.Float64: // If the value in the oneof is a nil pointer, don't panic
if fv.Float() == 0 { // in writeAny.
continue if fv.Kind() == reflect.Ptr && fv.IsNil() {
} // Use errors.New so writeAny won't render quotes.
case reflect.String: msg := errors.New("/* nil */")
if fv.String() == "" { fv = reflect.ValueOf(&msg).Elem()
continue
} }
} }
} }
@@ -514,44 +517,6 @@ func writeString(w *textWriter, s string) error {
return w.WriteByte('"') return w.WriteByte('"')
} }
func writeMessageSet(w *textWriter, ms *MessageSet) error {
for _, item := range ms.Item {
id := *item.TypeId
if msd, ok := messageSetMap[id]; ok {
// Known message set type.
if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
return err
}
w.indent()
pb := reflect.New(msd.t.Elem())
if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
return err
}
} else {
if err := writeStruct(w, pb.Elem()); err != nil {
return err
}
}
} else {
// Unknown type.
if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
return err
}
w.indent()
if err := writeUnknownStruct(w, item.Message); err != nil {
return err
}
}
w.unindent()
if _, err := w.Write(gtNewline); err != nil {
return err
}
}
return nil
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) { func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact { if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
@@ -666,10 +631,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
pb, err := GetExtension(ep, desc) pb, err := GetExtension(ep, desc)
if err != nil { if err != nil {
if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { return fmt.Errorf("failed getting extension: %v", err)
return err
}
continue
} }
// Repeated extensions will appear as a slice. // Repeated extensions will appear as a slice.

View File

@@ -119,6 +119,14 @@ func isWhitespace(c byte) bool {
return false return false
} }
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() { func (p *textParser) skipWhitespace() {
i := 0 i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
@@ -174,7 +182,7 @@ func (p *textParser) advance() {
} }
unq, err := unquoteC(p.s[1:i], rune(p.s[0])) unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil { if err != nil {
p.errorf("invalid quoted string %v", p.s[0:i+1]) p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return return
} }
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
@@ -333,13 +341,13 @@ func (p *textParser) next() *token {
p.advance() p.advance()
if p.done { if p.done {
p.cur.value = "" p.cur.value = ""
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace, // Look for multiple quoted strings separated by whitespace,
// and concatenate them. // and concatenate them.
cat := p.cur cat := p.cur
for { for {
p.skipWhitespace() p.skipWhitespace()
if p.done || p.s[0] != '"' { if p.done || !isQuote(p.s[0]) {
break break
} }
p.advance() p.advance()
@@ -385,8 +393,7 @@ func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSet
} }
// Returns the index in the struct for the named field, as well as the parsed tag properties. // Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
sprops := GetProperties(st)
i, ok := sprops.decoderOrigNames[name] i, ok := sprops.decoderOrigNames[name]
if ok { if ok {
return i, sprops.Prop[i], true return i, sprops.Prop[i], true
@@ -438,7 +445,8 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr
func (p *textParser) readStruct(sv reflect.Value, terminator string) error { func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type() st := sv.Type()
reqCount := GetProperties(st).reqCount sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error var reqFieldErr error
fieldSet := make(map[string]bool) fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of // A struct is a sequence of "name: value", terminated by one of
@@ -520,99 +528,113 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
sl = reflect.Append(sl, ext) sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface()) SetExtension(ep, desc, sl.Interface())
} }
} else { if err := p.consumeOptionalSeparator(); err != nil {
// This is a normal, non-extension field. return err
name := tok.value
fi, props, ok := structFieldByName(st, name)
if !ok {
return p.errorf("unknown field name %q in %v", name, st)
} }
continue
}
dst := sv.Field(fi) // This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map { if dst.Kind() == reflect.Map {
// Consume any colon. // Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil { if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
return err return err
} }
// Parse into the field. // Construct the map if it doesn't already exist.
fieldSet[name] = true if dst.IsNil() {
if err := p.readAny(dst, props); err != nil { dst.Set(reflect.MakeMap(dst.Type()))
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
} else if props.Required {
reqCount--
} }
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
continue
} }
// For backward compatibility, permit a semicolon or comma after a field. // Check that it's not already set if it's not a repeated field.
tok = p.next() if !props.Repeated && fieldSet[name] {
if tok.err != nil { return p.errorf("non-repeated field %q was repeated", name)
return tok.err
} }
if tok.value != ";" && tok.value != "," {
p.back() if err := p.checkForColon(props, dst.Type()); err != nil {
return err
} }
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
} else if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
} }
if reqCount > 0 { if reqCount > 0 {
@@ -621,6 +643,19 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return reqFieldErr return reqFieldErr
} }
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error { func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next() tok := p.next()
if tok.err != nil { if tok.err != nil {
@@ -645,18 +680,32 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
fv.Set(reflect.ValueOf(bytes)) fv.Set(reflect.ValueOf(bytes))
return nil return nil
} }
// Repeated field. May already exist. // Repeated field.
flen := fv.Len() if tok.value == "[" {
if flen == fv.Cap() { // Repeated field with list notation, like [1,2,3].
nav := reflect.MakeSlice(at, flen, 2*flen+1) for {
reflect.Copy(nav, fv) fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
fv.Set(nav) err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
} }
fv.SetLen(flen + 1) // One value of the repeated field.
// Read one.
p.back() p.back()
return p.readAny(fv.Index(flen), props) fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool: case reflect.Bool:
// Either "true", "false", 1 or 0. // Either "true", "false", 1 or 0.
switch tok.value { switch tok.value {

View File

@@ -467,7 +467,7 @@ func (self *version2_1) Version() string {
} }
func (self *version2_1) SupportedRequestTypes() []string { func (self *version2_1) SupportedRequestTypes() []string {
return self.baseVersion.SupportedRequestTypes() return append([]string{machineStatsApi}, self.baseVersion.SupportedRequestTypes()...)
} }
func (self *version2_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { func (self *version2_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
@@ -492,9 +492,16 @@ func (self *version2_1) HandleRequest(requestType string, request []string, m ma
if err != nil { if err != nil {
return err return err
} }
contStats := make(map[string][]*v2.ContainerStats, len(conts)) contStats := make(map[string]v2.ContainerInfo, len(conts))
for name, cont := range conts { for name, cont := range conts {
contStats[name] = v2.ContainerStatsFromV1(&cont.Spec, cont.Stats) if name == "/" {
// Root cgroup stats should be exposed as machine stats
continue
}
contStats[name] = v2.ContainerInfo{
Spec: v2.ContainerSpecFromV1(&cont.Spec, cont.Aliases, cont.Namespace),
Stats: v2.ContainerStatsFromV1(&cont.Spec, cont.Stats),
}
} }
return writeResult(contStats, w) return writeResult(contStats, w)
default: default:

View File

@@ -40,6 +40,7 @@ var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "do
var DockerNamespace = "docker" var DockerNamespace = "docker"
// Basepath to all container specific information that libcontainer stores. // Basepath to all container specific information that libcontainer stores.
// TODO: Deprecate this flag
var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path to the Docker state root directory (default: /var/lib/docker)") var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path to the Docker state root directory (default: /var/lib/docker)")
var dockerRunDir = flag.String("docker_run", "/var/run/docker", "Absolute path to the Docker run directory (default: /var/run/docker)") var dockerRunDir = flag.String("docker_run", "/var/run/docker", "Absolute path to the Docker run directory (default: /var/run/docker)")
@@ -61,6 +62,10 @@ func DockerStateDir() string {
var useSystemd = false var useSystemd = false
var check = sync.Once{} var check = sync.Once{}
const (
dockerRootDirKey = "Root Dir"
)
func UseSystemd() bool { func UseSystemd() bool {
check.Do(func() { check.Do(func() {
if *noSystemd { if *noSystemd {
@@ -101,6 +106,7 @@ type dockerFactory struct {
machineInfoFactory info.MachineInfoFactory machineInfoFactory info.MachineInfoFactory
storageDriver storageDriver storageDriver storageDriver
storageDir string
client *docker.Client client *docker.Client
@@ -109,6 +115,8 @@ type dockerFactory struct {
// Information about mounted filesystems. // Information about mounted filesystems.
fsInfo fs.FsInfo fsInfo fs.FsInfo
dockerVersion []int
} }
func (self *dockerFactory) String() string { func (self *dockerFactory) String() string {
@@ -129,9 +137,11 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
self.machineInfoFactory, self.machineInfoFactory,
self.fsInfo, self.fsInfo,
self.storageDriver, self.storageDriver,
self.storageDir,
&self.cgroupSubsystems, &self.cgroupSubsystems,
inHostNamespace, inHostNamespace,
metadataEnvs, metadataEnvs,
self.dockerVersion,
) )
return return
} }
@@ -214,36 +224,45 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
if err != nil { if err != nil {
return fmt.Errorf("unable to communicate with docker daemon: %v", err) return fmt.Errorf("unable to communicate with docker daemon: %v", err)
} }
var dockerVersion []int
if version, err := client.Version(); err != nil { if version, err := client.Version(); err != nil {
return fmt.Errorf("unable to communicate with docker daemon: %v", err) return fmt.Errorf("unable to communicate with docker daemon: %v", err)
} else { } else {
expected_version := []int{1, 0, 0} expected_version := []int{1, 0, 0}
version_string := version.Get("Version") version_string := version.Get("Version")
version, err := parseDockerVersion(version_string) dockerVersion, err = parseDockerVersion(version_string)
if err != nil { if err != nil {
return fmt.Errorf("couldn't parse docker version: %v", err) return fmt.Errorf("couldn't parse docker version: %v", err)
} }
for index, number := range version { for index, number := range dockerVersion {
if number > expected_version[index] { if number > expected_version[index] {
break break
} else if number < expected_version[index] { } else if number < expected_version[index] {
return fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as \"%v\"", expected_version, version, version_string) return fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as \"%v\"", expected_version, dockerVersion, version_string)
} }
} }
} }
// Check that the libcontainer execdriver is used. information, err := client.Info()
information, err := DockerInfo()
if err != nil { if err != nil {
return fmt.Errorf("failed to detect Docker info: %v", err) return fmt.Errorf("failed to detect Docker info: %v", err)
} }
execDriver, ok := information["ExecutionDriver"]
if !ok || !strings.HasPrefix(execDriver, "native") { // Check that the libcontainer execdriver is used.
execDriver := information.Get("ExecutionDriver")
if !strings.HasPrefix(execDriver, "native") {
return fmt.Errorf("docker found, but not using native exec driver") return fmt.Errorf("docker found, but not using native exec driver")
} }
sd, _ := information["Driver"] sd := information.Get("Driver")
if sd == "" {
return fmt.Errorf("failed to find docker storage driver")
}
storageDir := information.Get("DockerRootDir")
if storageDir == "" {
storageDir = *dockerRootDir
}
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
if err != nil { if err != nil {
return fmt.Errorf("failed to get cgroup subsystems: %v", err) return fmt.Errorf("failed to get cgroup subsystems: %v", err)
@@ -251,11 +270,13 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
glog.Infof("Registering Docker factory") glog.Infof("Registering Docker factory")
f := &dockerFactory{ f := &dockerFactory{
machineInfoFactory: factory,
client: client,
storageDriver: storageDriver(sd),
cgroupSubsystems: cgroupSubsystems, cgroupSubsystems: cgroupSubsystems,
client: client,
dockerVersion: dockerVersion,
fsInfo: fsInfo, fsInfo: fsInfo,
machineInfoFactory: factory,
storageDriver: storageDriver(sd),
storageDir: storageDir,
} }
container.RegisterContainerHandlerFactory(f) container.RegisterContainerHandlerFactory(f)
return nil return nil

View File

@@ -17,6 +17,7 @@ package docker
import ( import (
"fmt" "fmt"
"io/ioutil"
"math" "math"
"path" "path"
"strings" "strings"
@@ -35,14 +36,10 @@ import (
) )
const ( const (
// Path to aufs dir where all the files exist. // The read write layers exist here.
// aufs/layers is ignored here since it does not hold a lot of data. aufsRWLayer = "diff"
// aufs/mnt contains the mount points used to compose the rootfs. Hence it is also ignored.
pathToAufsDir = "aufs/diff"
// Path to the directory where docker stores log files if the json logging driver is enabled. // Path to the directory where docker stores log files if the json logging driver is enabled.
pathToContainersDir = "containers" pathToContainersDir = "containers"
// Path to the overlayfs storage driver directory.
pathToOverlayDir = "overlay"
) )
type dockerContainerHandler struct { type dockerContainerHandler struct {
@@ -86,15 +83,34 @@ type dockerContainerHandler struct {
fsHandler fsHandler fsHandler fsHandler
} }
func getRwLayerID(containerID, storageDir string, sd storageDriver, dockerVersion []int) (string, error) {
const (
// Docker version >=1.10.0 have a randomized ID for the root fs of a container.
randomizedRWLayerMinorVersion = 10
rwLayerIDFile = "mount-id"
)
if (dockerVersion[0] <= 1) && (dockerVersion[1] < randomizedRWLayerMinorVersion) {
return containerID, nil
}
bytes, err := ioutil.ReadFile(path.Join(storageDir, "image", string(sd), "layerdb", "mounts", containerID, rwLayerIDFile))
if err != nil {
return "", fmt.Errorf("failed to identify the read-write layer ID for container %q. - %v", containerID, err)
}
return string(bytes), err
}
func newDockerContainerHandler( func newDockerContainerHandler(
client *docker.Client, client *docker.Client,
name string, name string,
machineInfoFactory info.MachineInfoFactory, machineInfoFactory info.MachineInfoFactory,
fsInfo fs.FsInfo, fsInfo fs.FsInfo,
storageDriver storageDriver, storageDriver storageDriver,
storageDir string,
cgroupSubsystems *containerlibcontainer.CgroupSubsystems, cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
inHostNamespace bool, inHostNamespace bool,
metadataEnvs []string, metadataEnvs []string,
dockerVersion []int,
) (container.ContainerHandler, error) { ) (container.ContainerHandler, error) {
// Create the cgroup paths. // Create the cgroup paths.
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
@@ -118,13 +134,18 @@ func newDockerContainerHandler(
id := ContainerNameToDockerId(name) id := ContainerNameToDockerId(name)
// Add the Containers dir where the log files are stored. // Add the Containers dir where the log files are stored.
otherStorageDir := path.Join(*dockerRootDir, pathToContainersDir, id) otherStorageDir := path.Join(storageDir, pathToContainersDir, id)
rwLayerID, err := getRwLayerID(id, storageDir, storageDriver, dockerVersion)
if err != nil {
return nil, err
}
var rootfsStorageDir string var rootfsStorageDir string
switch storageDriver { switch storageDriver {
case aufsStorageDriver: case aufsStorageDriver:
rootfsStorageDir = path.Join(*dockerRootDir, pathToAufsDir, id) rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
case overlayStorageDriver: case overlayStorageDriver:
rootfsStorageDir = path.Join(*dockerRootDir, pathToOverlayDir, id) rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
} }
handler := &dockerContainerHandler{ handler := &dockerContainerHandler{
@@ -180,9 +201,11 @@ func (self *dockerContainerHandler) Cleanup() {
func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) { func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {
return info.ContainerReference{ return info.ContainerReference{
Id: self.id,
Name: self.name, Name: self.name,
Aliases: self.aliases, Aliases: self.aliases,
Namespace: DockerNamespace, Namespace: DockerNamespace,
Labels: self.labels,
}, nil }, nil
} }

View File

@@ -70,6 +70,9 @@ type ContainerSpec struct {
// Container reference contains enough information to uniquely identify a container // Container reference contains enough information to uniquely identify a container
type ContainerReference struct { type ContainerReference struct {
// The container id
Id string `json:"id,omitempty"`
// The absolute name of the container. This is unique on the machine. // The absolute name of the container. This is unique on the machine.
Name string `json:"name"` Name string `json:"name"`
@@ -80,6 +83,8 @@ type ContainerReference struct {
// Namespace under which the aliases of a container are unique. // Namespace under which the aliases of a container are unique.
// An example of a namespace is "docker" for Docker containers. // An example of a namespace is "docker" for Docker containers.
Namespace string `json:"namespace,omitempty"` Namespace string `json:"namespace,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
} }
// Sorts by container name. // Sorts by container name.

View File

@@ -123,7 +123,7 @@ func ContainerStatsFromV1(spec *v1.ContainerSpec, stats []*v1.ContainerStats) []
} }
} else if len(val.Filesystem) > 1 { } else if len(val.Filesystem) > 1 {
// Cannot handle multiple devices per container. // Cannot handle multiple devices per container.
glog.Errorf("failed to handle multiple devices for container. Skipping Filesystem stats") glog.V(2).Infof("failed to handle multiple devices for container. Skipping Filesystem stats")
} }
} }
if spec.HasDiskIo { if spec.HasDiskIo {

View File

@@ -0,0 +1,114 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kafka
import (
"encoding/json"
"flag"
"os"
"strings"
"time"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/storage"
"github.com/google/cadvisor/utils/container"
kafka "github.com/Shopify/sarama"
"github.com/golang/glog"
)
func init() {
storage.RegisterStorageDriver("kafka", new)
}
var (
brokers = flag.String("storage_driver_kafka_broker_list", "localhost:9092", "kafka broker(s) csv")
topic = flag.String("storage_driver_kafka_topic", "stats", "kafka topic")
)
type kafkaStorage struct {
producer kafka.AsyncProducer
topic string
machineName string
}
type detailSpec struct {
Timestamp time.Time `json:"timestamp"`
MachineName string `json:"machine_name,omitempty"`
ContainerName string `json:"container_Name,omitempty"`
ContainerID string `json:"container_Id,omitempty"`
ContainerLabels map[string]string `json:"container_labels,omitempty"`
ContainerStats *info.ContainerStats `json:"container_stats,omitempty"`
}
func (driver *kafkaStorage) infoToDetailSpec(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec {
timestamp := time.Now()
containerID := ref.Id
containerLabels := ref.Labels
containerName := container.GetPreferredName(ref)
detail := &detailSpec{
Timestamp: timestamp,
MachineName: driver.machineName,
ContainerName: containerName,
ContainerID: containerID,
ContainerLabels: containerLabels,
ContainerStats: stats,
}
return detail
}
func (driver *kafkaStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
detail := driver.infoToDetailSpec(ref, stats)
b, err := json.Marshal(detail)
driver.producer.Input() <- &kafka.ProducerMessage{
Topic: driver.topic,
Value: kafka.StringEncoder(b),
}
return err
}
func (self *kafkaStorage) Close() error {
return self.producer.Close()
}
func new() (storage.StorageDriver, error) {
machineName, err := os.Hostname()
if err != nil {
return nil, err
}
return newStorage(machineName)
}
func newStorage(machineName string) (storage.StorageDriver, error) {
config := kafka.NewConfig()
config.Producer.RequiredAcks = kafka.WaitForAll
brokerList := strings.Split(*brokers, ",")
glog.V(4).Infof("Kafka brokers:%q", brokers)
producer, err := kafka.NewAsyncProducer(brokerList, config)
if err != nil {
return nil, err
}
ret := &kafkaStorage{
producer: producer,
topic: *topic,
machineName: machineName,
}
return ret, nil
}

View File

@@ -0,0 +1,31 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container
import (
info "github.com/google/cadvisor/info/v1"
)
// Returns the alias a container is known by within a certain namespace,
// if available. Otherwise returns the absolute name of the container.
func GetPreferredName(ref info.ContainerReference) string {
var containerName string
if len(ref.Aliases) > 0 {
containerName = ref.Aliases[0]
} else {
containerName = ref.Name
}
return containerName
}

View File

@@ -1 +1 @@
0.20.5 0.21.1

View File

@@ -2,13 +2,14 @@ language: go
go: go:
- 1.3 - 1.3
- 1.4 - 1.4
- 1.5
- tip - tip
install: install:
- go get -v ./... - go get -v -t ./...
- go get golang.org/x/tools/cmd/cover - go get golang.org/x/tools/cmd/cover
- go get github.com/onsi/gomega - go get github.com/onsi/gomega
- go install github.com/onsi/ginkgo/ginkgo - go install github.com/onsi/ginkgo/ginkgo
- export PATH=$PATH:$HOME/gopath/bin - export PATH=$PATH:$HOME/gopath/bin
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace

View File

@@ -3,6 +3,7 @@
Improvements: Improvements:
- `Skip(message)` can be used to skip the current test. - `Skip(message)` can be used to skip the current test.
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
Bug Fixes: Bug Fixes:

View File

@@ -79,7 +79,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
} }
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).") flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")

View File

@@ -0,0 +1,98 @@
/*
Table provides a simple DSL for Ginkgo-native Table-Driven Tests
The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
*/
package table
import (
"fmt"
"reflect"
"github.com/onsi/ginkgo"
)
/*
DescribeTable describes a table-driven test.
For example:
DescribeTable("a simple table",
func(x int, y int, expected bool) {
Ω(x > y).Should(Equal(expected))
},
Entry("x > y", 1, 0, true),
Entry("x == y", 0, 0, false),
Entry("x < y", 0, 1, false),
)
The first argument to `DescribeTable` is a string description.
The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It.
The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors.
The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function.
Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`.
It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
*/
func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, false, false)
return true
}
/*
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
*/
func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, false, true)
return true
}
/*
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
*/
func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, true, false)
return true
}
/*
You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
*/
func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, true, false)
return true
}
func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
itBodyValue := reflect.ValueOf(itBody)
if itBodyValue.Kind() != reflect.Func {
panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
}
if pending {
ginkgo.PDescribe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
} else if focused {
ginkgo.FDescribe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
} else {
ginkgo.Describe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
}
}

View File

@@ -0,0 +1,81 @@
package table
import (
"reflect"
"github.com/onsi/ginkgo"
)
/*
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
*/
type TableEntry struct {
Description string
Parameters []interface{}
Pending bool
Focused bool
}
func (t TableEntry) generateIt(itBody reflect.Value) {
if t.Pending {
ginkgo.PIt(t.Description)
return
}
values := []reflect.Value{}
for i, param := range t.Parameters {
var value reflect.Value
if param == nil {
inType := itBody.Type().In(i)
value = reflect.Zero(inType)
} else {
value = reflect.ValueOf(param)
}
values = append(values, value)
}
body := func() {
itBody.Call(values)
}
if t.Focused {
ginkgo.FIt(t.Description, body)
} else {
ginkgo.It(t.Description, body)
}
}
/*
Entry constructs a TableEntry.
The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
Each Entry ends up generating an individual Ginkgo It.
*/
func Entry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, false}
}
/*
You can focus a particular entry with FEntry. This is equivalent to FIt.
*/
func FEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, true}
}
/*
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
*/
func PEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
}
/*
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
*/
func XEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
}

View File

@@ -63,6 +63,8 @@ func findSuiteFile() (string, os.FileMode) {
if err != nil { if err != nil {
complainAndQuit("Could not find suite file for nodot: " + err.Error()) complainAndQuit("Could not find suite file for nodot: " + err.Error())
} }
defer f.Close()
if re.MatchReader(bufio.NewReader(f)) { if re.MatchReader(bufio.NewReader(f)) {
return path, file.Mode() return path, file.Mode()
} }

View File

@@ -101,13 +101,18 @@ func (t *TestRunner) CompileTo(path string) error {
} }
if fileExists(path) == false { if fileExists(path) == false {
compiledFile := filepath.Join(t.Suite.Path, t.Suite. PackageName+".test") compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test")
if fileExists(compiledFile) { if fileExists(compiledFile) {
// seems like we are on an old go version that does not support the -o flag on go test // seems like we are on an old go version that does not support the -o flag on go test
// move the compiled test file to the desired location by hand // move the compiled test file to the desired location by hand
err = os.Rename(compiledFile, path) err = os.Rename(compiledFile, path)
if err != nil { if err != nil {
return fmt.Errorf("Failed to move compiled file: %s", err) // We cannot move the file, perhaps because the source and destination
// are on different partitions. We can copy the file, however.
err = copyFile(compiledFile, path)
if err != nil {
return fmt.Errorf("Failed to copy compiled file: %s", err)
}
} }
} else { } else {
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path) return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
@@ -124,6 +129,49 @@ func fileExists(path string) bool {
return err == nil || os.IsNotExist(err) == false return err == nil || os.IsNotExist(err) == false
} }
// copyFile copies the contents of the file named src to the file named
// by dst. The file will be created if it does not already exist. If the
// destination file exists, all it's contents will be replaced by the contents
// of the source file.
func copyFile(src, dst string) error {
srcInfo, err := os.Stat(src)
if err != nil {
return err
}
mode := srcInfo.Mode()
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
closeErr := out.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.Copy(out, in)
if err != nil {
return err
}
err = out.Sync()
if err != nil {
return err
}
return out.Chmod(mode)
}
/* /*
go test -c -i spits package.test out into the cwd. there's no way to change this. go test -c -i spits package.test out into the cwd. there's no way to change this.

Some files were not shown because too many files have changed in this diff Show More