diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 55d29ed532c..7732306127c 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -300,6 +300,11 @@ "ImportPath": "github.com/coreos/pkg/timeutil", "Rev": "fa94270d4bac0d8ae5dc6b71894e251aada93f74" }, + { + "ImportPath": "github.com/coreos/rkt/api/v1alpha", + "Comment": "v0.11.0-22-g71d7331", + "Rev": "71d7331e46f20aaa41bed05e861bfeca92249968" + }, { "ImportPath": "github.com/cpuguy83/go-md2man/md2man", "Comment": "v1.0.4", @@ -747,6 +752,14 @@ "ImportPath": "golang.org/x/net/html", "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" }, + { + "ImportPath": "golang.org/x/net/internal/timeseries", + "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" + }, + { + "ImportPath": "golang.org/x/net/trace", + "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" + }, { "ImportPath": "golang.org/x/net/websocket", "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" @@ -789,7 +802,7 @@ }, { "ImportPath": "google.golang.org/grpc", - "Rev": "f5ebd86be717593ab029545492c93ddf8914832b" + "Rev": "4bd040ce23a624ff9a1d07b0e729ee189bddd51c" }, { "ImportPath": "gopkg.in/natefinch/lumberjack.v2", diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/LICENSE b/Godeps/_workspace/src/github.com/coreos/rkt/LICENSE new file mode 100644 index 00000000000..e06d2081865 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/rkt/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/README.md b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/README.md new file mode 100644 index 00000000000..7d4eada04a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/README.md @@ -0,0 +1,12 @@ +# WARNING + +The API defined here is proposed, experimental, and (for now) subject to change at any time. + +**Do not use it.** + +If you think you want to use it, or for any other queries, contact or file an [issue](https://github.com/coreos/rkt/issues/new) + +For more information, see: +- #1208 +- #1359 +- #1468 diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.pb.go b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.pb.go new file mode 100644 index 00000000000..05f9ffea1dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.pb.go @@ -0,0 +1,991 @@ +// Code generated by protoc-gen-go. +// source: api.proto +// DO NOT EDIT! + +/* +Package v1alpha is a generated protocol buffer package. + +It is generated from these files: + api.proto + +It has these top-level messages: + ImageFormat + Image + Network + App + Pod + KeyValue + PodFilter + ImageFilter + Info + Event + EventFilter + GetInfoRequest + GetInfoResponse + ListPodsRequest + ListPodsResponse + InspectPodRequest + InspectPodResponse + ListImagesRequest + ListImagesResponse + InspectImageRequest + InspectImageResponse + ListenEventsRequest + ListenEventsResponse + GetLogsRequest + GetLogsResponse +*/ +package v1alpha + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// ImageType defines the supported image type. +type ImageType int32 + +const ( + ImageType_IMAGE_TYPE_UNDEFINED ImageType = 0 + ImageType_IMAGE_TYPE_APPC ImageType = 1 + ImageType_IMAGE_TYPE_DOCKER ImageType = 2 + ImageType_IMAGE_TYPE_OCI ImageType = 3 +) + +var ImageType_name = map[int32]string{ + 0: "IMAGE_TYPE_UNDEFINED", + 1: "IMAGE_TYPE_APPC", + 2: "IMAGE_TYPE_DOCKER", + 3: "IMAGE_TYPE_OCI", +} +var ImageType_value = map[string]int32{ + "IMAGE_TYPE_UNDEFINED": 0, + "IMAGE_TYPE_APPC": 1, + "IMAGE_TYPE_DOCKER": 2, + "IMAGE_TYPE_OCI": 3, +} + +func (x ImageType) String() string { + return proto.EnumName(ImageType_name, int32(x)) +} + +// AppState defines the possible states of the app. +type AppState int32 + +const ( + AppState_APP_STATE_UNDEFINED AppState = 0 + AppState_APP_STATE_RUNNING AppState = 1 + AppState_APP_STATE_EXITED AppState = 2 +) + +var AppState_name = map[int32]string{ + 0: "APP_STATE_UNDEFINED", + 1: "APP_STATE_RUNNING", + 2: "APP_STATE_EXITED", +} +var AppState_value = map[string]int32{ + "APP_STATE_UNDEFINED": 0, + "APP_STATE_RUNNING": 1, + "APP_STATE_EXITED": 2, +} + +func (x AppState) String() string { + return proto.EnumName(AppState_name, int32(x)) +} + +// PodState defines the possible states of the pod. +// See https://github.com/coreos/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed +// explanation of each state. +type PodState int32 + +const ( + PodState_POD_STATE_UNDEFINED PodState = 0 + // States before the pod is running. + PodState_POD_STATE_EMBRYO PodState = 1 + PodState_POD_STATE_PREPARING PodState = 2 + PodState_POD_STATE_PREPARED PodState = 3 + // State that indicates the pod is running. + PodState_POD_STATE_RUNNING PodState = 4 + // States that indicates the pod is exited, and will never run. + PodState_POD_STATE_ABORTED_PREPARE PodState = 5 + PodState_POD_STATE_EXITED PodState = 6 + PodState_POD_STATE_DELETING PodState = 7 + PodState_POD_STATE_GARBAGE PodState = 8 +) + +var PodState_name = map[int32]string{ + 0: "POD_STATE_UNDEFINED", + 1: "POD_STATE_EMBRYO", + 2: "POD_STATE_PREPARING", + 3: "POD_STATE_PREPARED", + 4: "POD_STATE_RUNNING", + 5: "POD_STATE_ABORTED_PREPARE", + 6: "POD_STATE_EXITED", + 7: "POD_STATE_DELETING", + 8: "POD_STATE_GARBAGE", +} +var PodState_value = map[string]int32{ + "POD_STATE_UNDEFINED": 0, + "POD_STATE_EMBRYO": 1, + "POD_STATE_PREPARING": 2, + "POD_STATE_PREPARED": 3, + "POD_STATE_RUNNING": 4, + "POD_STATE_ABORTED_PREPARE": 5, + "POD_STATE_EXITED": 6, + "POD_STATE_DELETING": 7, + "POD_STATE_GARBAGE": 8, +} + +func (x PodState) String() string { + return proto.EnumName(PodState_name, int32(x)) +} + +// EventType defines the type of the events that will be received via ListenEvents(). +type EventType int32 + +const ( + EventType_EVENT_TYPE_UNDEFINED EventType = 0 + // Pod events. + EventType_EVENT_TYPE_POD_PREPARED EventType = 1 + EventType_EVENT_TYPE_POD_PREPARE_ABORTED EventType = 2 + EventType_EVENT_TYPE_POD_STARTED EventType = 3 + EventType_EVENT_TYPE_POD_EXITED EventType = 4 + EventType_EVENT_TYPE_POD_GARBAGE_COLLECTED EventType = 5 + // App events. + EventType_EVENT_TYPE_APP_STARTED EventType = 6 + EventType_EVENT_TYPE_APP_EXITED EventType = 7 + // Image events. + EventType_EVENT_TYPE_IMAGE_IMPORTED EventType = 8 + EventType_EVENT_TYPE_IMAGE_REMOVED EventType = 9 +) + +var EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNDEFINED", + 1: "EVENT_TYPE_POD_PREPARED", + 2: "EVENT_TYPE_POD_PREPARE_ABORTED", + 3: "EVENT_TYPE_POD_STARTED", + 4: "EVENT_TYPE_POD_EXITED", + 5: "EVENT_TYPE_POD_GARBAGE_COLLECTED", + 6: "EVENT_TYPE_APP_STARTED", + 7: "EVENT_TYPE_APP_EXITED", + 8: "EVENT_TYPE_IMAGE_IMPORTED", + 9: "EVENT_TYPE_IMAGE_REMOVED", +} +var EventType_value = map[string]int32{ + "EVENT_TYPE_UNDEFINED": 0, + "EVENT_TYPE_POD_PREPARED": 1, + "EVENT_TYPE_POD_PREPARE_ABORTED": 2, + "EVENT_TYPE_POD_STARTED": 3, + "EVENT_TYPE_POD_EXITED": 4, + "EVENT_TYPE_POD_GARBAGE_COLLECTED": 5, + "EVENT_TYPE_APP_STARTED": 6, + "EVENT_TYPE_APP_EXITED": 7, + "EVENT_TYPE_IMAGE_IMPORTED": 8, + "EVENT_TYPE_IMAGE_REMOVED": 9, +} + +func (x EventType) String() string { + return proto.EnumName(EventType_name, int32(x)) +} + +// ImageFormat defines the format of the image. +type ImageFormat struct { + // Type of the image, required. + Type ImageType `protobuf:"varint,1,opt,name=type,enum=v1alpha.ImageType" json:"type,omitempty"` + // Version of the image format, required. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *ImageFormat) Reset() { *m = ImageFormat{} } +func (m *ImageFormat) String() string { return proto.CompactTextString(m) } +func (*ImageFormat) ProtoMessage() {} + +// Image describes the image's information. +type Image struct { + // Base format of the image, required. This indicates the original format + // for the image as nowadays all the image formats will be transformed to + // ACI. + BaseFormat *ImageFormat `protobuf:"bytes,1,opt,name=base_format" json:"base_format,omitempty"` + // ID of the image, a string that can be used to uniquely identify the image, + // e.g. sha512 hash of the ACIs, required. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Name of the image in the image manifest, e.g. 'coreos.com/etcd', optional. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // Version of the image, e.g. 'latest', '2.0.10', optional. + Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"` + // Timestamp of when the image is imported, it is the seconds since epoch, optional. + ImportTimestamp int64 `protobuf:"varint,5,opt,name=import_timestamp" json:"import_timestamp,omitempty"` + // JSON-encoded byte array that represents the image manifest, optional. + Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (m *Image) String() string { return proto.CompactTextString(m) } +func (*Image) ProtoMessage() {} + +func (m *Image) GetBaseFormat() *ImageFormat { + if m != nil { + return m.BaseFormat + } + return nil +} + +// Network describes the network information of a pod. +type Network struct { + // Name of the network that a pod belongs to, required. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Pod's IPv4 address within the network, optional if IPv6 address is given. + Ipv4 string `protobuf:"bytes,2,opt,name=ipv4" json:"ipv4,omitempty"` + // Pod's IPv6 address within the network, optional if IPv4 address is given. + Ipv6 string `protobuf:"bytes,3,opt,name=ipv6" json:"ipv6,omitempty"` +} + +func (m *Network) Reset() { *m = Network{} } +func (m *Network) String() string { return proto.CompactTextString(m) } +func (*Network) ProtoMessage() {} + +// App describes the information of an app that's running in a pod. +type App struct { + // Name of the app, required. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Image used by the app, required. However, this may only contain the image id + // if it is returned by ListPods(). + Image *Image `protobuf:"bytes,2,opt,name=image" json:"image,omitempty"` + // State of the app. optional, non-empty only if it's returned by InspectPod(). + State AppState `protobuf:"varint,3,opt,name=state,enum=v1alpha.AppState" json:"state,omitempty"` + // Exit code of the app. optional, only valid if it's returned by InspectPod() and + // the app has already exited. + ExitCode int32 `protobuf:"zigzag32,4,opt,name=exit_code" json:"exit_code,omitempty"` +} + +func (m *App) Reset() { *m = App{} } +func (m *App) String() string { return proto.CompactTextString(m) } +func (*App) ProtoMessage() {} + +func (m *App) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +// Pod describes a pod's information. +type Pod struct { + // ID of the pod, in the form of a UUID, required. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // PID of the pod, optional, only valid if it's returned by InspectPod(). A negative value means the pod has exited. + Pid int32 `protobuf:"zigzag32,2,opt,name=pid" json:"pid,omitempty"` + // State of the pod, required. + State PodState `protobuf:"varint,3,opt,name=state,enum=v1alpha.PodState" json:"state,omitempty"` + // List of apps in the pod, required. + Apps []*App `protobuf:"bytes,4,rep,name=apps" json:"apps,omitempty"` + // Network information of the pod, optional, non-empty if the pod is running in private net. + // Note that a pod can be in multiple networks. + Networks []*Network `protobuf:"bytes,5,rep,name=networks" json:"networks,omitempty"` + // JSON-encoded byte array that represents the pod manifest of the pod, required. + Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"` +} + +func (m *Pod) Reset() { *m = Pod{} } +func (m *Pod) String() string { return proto.CompactTextString(m) } +func (*Pod) ProtoMessage() {} + +func (m *Pod) GetApps() []*App { + if m != nil { + return m.Apps + } + return nil +} + +func (m *Pod) GetNetworks() []*Network { + if m != nil { + return m.Networks + } + return nil +} + +type KeyValue struct { + // Key part of the key-value pair. + Key string `protobuf:"bytes,1,opt" json:"Key,omitempty"` + // Value part of the key-value pair. + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} + +// PodFilter defines the condition that the returned pods need to satisfy in ListPods(). +// The conditions are combined by 'AND'. +type PodFilter struct { + // If not empty, the pods that have any of the ids will be returned. + Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"` + // If not empty, the pods that have any of the states will be returned. + States []PodState `protobuf:"varint,2,rep,name=states,enum=v1alpha.PodState" json:"states,omitempty"` + // If not empty, the pods that have any of the apps will be returned. + AppNames []string `protobuf:"bytes,3,rep,name=app_names" json:"app_names,omitempty"` + // If not empty, the pods that have any of the images(in the apps) will be returned + ImageIds []string `protobuf:"bytes,4,rep,name=image_ids" json:"image_ids,omitempty"` + // If not empty, the pods that are in any of the networks will be returned. + NetworkNames []string `protobuf:"bytes,5,rep,name=network_names" json:"network_names,omitempty"` + // If not empty, the pods that have any of the annotations will be returned. + Annotations []*KeyValue `protobuf:"bytes,6,rep,name=annotations" json:"annotations,omitempty"` +} + +func (m *PodFilter) Reset() { *m = PodFilter{} } +func (m *PodFilter) String() string { return proto.CompactTextString(m) } +func (*PodFilter) ProtoMessage() {} + +func (m *PodFilter) GetAnnotations() []*KeyValue { + if m != nil { + return m.Annotations + } + return nil +} + +// ImageFilter defines the condition that the returned images need to satisfy in ListImages(). +// The conditions are combined by 'AND'. +type ImageFilter struct { + // If not empty, the images that have any of the ids will be returned. + Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"` + // if not empty, the images that have any of the prefixes in the name will be returned. + Prefixes []string `protobuf:"bytes,2,rep,name=prefixes" json:"prefixes,omitempty"` + // If not empty, the images that have any of the base names will be returned. + // For example, both 'coreos.com/etcd' and 'k8s.io/etcd' will be returned if 'etcd' is included, + // however 'k8s.io/etcd-backup' will not be returned. + BaseNames []string `protobuf:"bytes,3,rep,name=base_names" json:"base_names,omitempty"` + // If not empty, the images that have any of the keywords in the name will be returned. + // For example, both 'kubernetes-etcd', 'etcd:latest' will be returned if 'etcd' is included, + Keywords []string `protobuf:"bytes,4,rep,name=keywords" json:"keywords,omitempty"` + // If not empty, the images that have any of the labels will be returned. + Labels []*KeyValue `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` + // If set, the images that are imported after this timestamp will be returned. + ImportedAfter int64 `protobuf:"varint,6,opt,name=imported_after" json:"imported_after,omitempty"` + // If set, the images that are imported before this timestamp will be returned. + ImportedBefore int64 `protobuf:"varint,7,opt,name=imported_before" json:"imported_before,omitempty"` + // If not empty, the images that have any of the annotations will be returned. + Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"` +} + +func (m *ImageFilter) Reset() { *m = ImageFilter{} } +func (m *ImageFilter) String() string { return proto.CompactTextString(m) } +func (*ImageFilter) ProtoMessage() {} + +func (m *ImageFilter) GetLabels() []*KeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ImageFilter) GetAnnotations() []*KeyValue { + if m != nil { + return m.Annotations + } + return nil +} + +// Info describes the information of rkt on the machine. +type Info struct { + // Version of rkt, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/). + RktVersion string `protobuf:"bytes,1,opt,name=rkt_version" json:"rkt_version,omitempty"` + // Version of appc, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/). + AppcVersion string `protobuf:"bytes,2,opt,name=appc_version" json:"appc_version,omitempty"` + // Latest version of the api that's supported by the service, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/). + ApiVersion string `protobuf:"bytes,3,opt,name=api_version" json:"api_version,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} + +// Event describes the events that will be received via ListenEvents(). +type Event struct { + // Type of the event, required. + Type EventType `protobuf:"varint,1,opt,name=type,enum=v1alpha.EventType" json:"type,omitempty"` + // ID of the subject that causes the event, required. + // If the event is a pod or app event, the id is the pod's uuid. + // If the event is an image event, the id is the image's id. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Name of the subject that causes the event, required. + // If the event is a pod event, the name is the pod's name. + // If the event is an app event, the name is the app's name. + // If the event is an image event, the name is the image's name. + From string `protobuf:"bytes,3,opt,name=from" json:"from,omitempty"` + // Timestamp of when the event happens, it is the seconds since epoch, required. + Time int64 `protobuf:"varint,4,opt,name=time" json:"time,omitempty"` + // Data of the event, in the form of key-value pairs, optional. + Data []*KeyValue `protobuf:"bytes,5,rep,name=data" json:"data,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} + +func (m *Event) GetData() []*KeyValue { + if m != nil { + return m.Data + } + return nil +} + +// EventFilter defines the condition that the returned events needs to satisfy in ListImages(). +// The condition are combined by 'AND'. +type EventFilter struct { + // If not empty, then only returns the events that have the listed types. + Types []EventType `protobuf:"varint,1,rep,name=types,enum=v1alpha.EventType" json:"types,omitempty"` + // If not empty, then only returns the events whose 'id' is included in the listed ids. + Ids []string `protobuf:"bytes,2,rep,name=ids" json:"ids,omitempty"` + // If not empty, then only returns the events whose 'from' is included in the listed names. + Names []string `protobuf:"bytes,3,rep,name=names" json:"names,omitempty"` + // If set, then only returns the events after this timestamp. + // If the server starts after since_time, then only the events happened after the start of the server will be returned. + // If since_time is a future timestamp, then no events will be returned until that time. + SinceTime int64 `protobuf:"varint,4,opt,name=since_time" json:"since_time,omitempty"` + // If set, then only returns the events before this timestamp. + // If it is a future timestamp, then the event stream will be closed at that moment. + UntilTime int64 `protobuf:"varint,5,opt,name=until_time" json:"until_time,omitempty"` +} + +func (m *EventFilter) Reset() { *m = EventFilter{} } +func (m *EventFilter) String() string { return proto.CompactTextString(m) } +func (*EventFilter) ProtoMessage() {} + +// Request for GetInfo(). +type GetInfoRequest struct { +} + +func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } +func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetInfoRequest) ProtoMessage() {} + +// Response for GetInfo(). +type GetInfoResponse struct { + Info *Info `protobuf:"bytes,1,opt,name=info" json:"info,omitempty"` +} + +func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} } +func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetInfoResponse) ProtoMessage() {} + +func (m *GetInfoResponse) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +// Request for ListPods(). +type ListPodsRequest struct { + Filter *PodFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} } +func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) } +func (*ListPodsRequest) ProtoMessage() {} + +func (m *ListPodsRequest) GetFilter() *PodFilter { + if m != nil { + return m.Filter + } + return nil +} + +// Response for ListPods(). +type ListPodsResponse struct { + Pods []*Pod `protobuf:"bytes,1,rep,name=pods" json:"pods,omitempty"` +} + +func (m *ListPodsResponse) Reset() { *m = ListPodsResponse{} } +func (m *ListPodsResponse) String() string { return proto.CompactTextString(m) } +func (*ListPodsResponse) ProtoMessage() {} + +func (m *ListPodsResponse) GetPods() []*Pod { + if m != nil { + return m.Pods + } + return nil +} + +// Request for InspectPod(). +type InspectPodRequest struct { + // ID of the pod which we are querying status for, required. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *InspectPodRequest) Reset() { *m = InspectPodRequest{} } +func (m *InspectPodRequest) String() string { return proto.CompactTextString(m) } +func (*InspectPodRequest) ProtoMessage() {} + +// Response for InspectPod(). +type InspectPodResponse struct { + Pod *Pod `protobuf:"bytes,1,opt,name=pod" json:"pod,omitempty"` +} + +func (m *InspectPodResponse) Reset() { *m = InspectPodResponse{} } +func (m *InspectPodResponse) String() string { return proto.CompactTextString(m) } +func (*InspectPodResponse) ProtoMessage() {} + +func (m *InspectPodResponse) GetPod() *Pod { + if m != nil { + return m.Pod + } + return nil +} + +// Request for ListImages(). +type ListImagesRequest struct { + Filter *ImageFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } +func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) } +func (*ListImagesRequest) ProtoMessage() {} + +func (m *ListImagesRequest) GetFilter() *ImageFilter { + if m != nil { + return m.Filter + } + return nil +} + +// Response for ListImages(). +type ListImagesResponse struct { + Images []*Image `protobuf:"bytes,1,rep,name=images" json:"images,omitempty"` +} + +func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } +func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) } +func (*ListImagesResponse) ProtoMessage() {} + +func (m *ListImagesResponse) GetImages() []*Image { + if m != nil { + return m.Images + } + return nil +} + +// Request for InspectImage(). +type InspectImageRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *InspectImageRequest) Reset() { *m = InspectImageRequest{} } +func (m *InspectImageRequest) String() string { return proto.CompactTextString(m) } +func (*InspectImageRequest) ProtoMessage() {} + +// Response for InspectImage(). +type InspectImageResponse struct { + Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` +} + +func (m *InspectImageResponse) Reset() { *m = InspectImageResponse{} } +func (m *InspectImageResponse) String() string { return proto.CompactTextString(m) } +func (*InspectImageResponse) ProtoMessage() {} + +func (m *InspectImageResponse) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +// Request for ListenEvents(). +type ListenEventsRequest struct { + Filter *EventFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListenEventsRequest) Reset() { *m = ListenEventsRequest{} } +func (m *ListenEventsRequest) String() string { return proto.CompactTextString(m) } +func (*ListenEventsRequest) ProtoMessage() {} + +func (m *ListenEventsRequest) GetFilter() *EventFilter { + if m != nil { + return m.Filter + } + return nil +} + +// Response for ListenEvents(). +type ListenEventsResponse struct { + // Aggregate multiple events to reduce round trips, optional as the response can contain no events. + Events []*Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` +} + +func (m *ListenEventsResponse) Reset() { *m = ListenEventsResponse{} } +func (m *ListenEventsResponse) String() string { return proto.CompactTextString(m) } +func (*ListenEventsResponse) ProtoMessage() {} + +func (m *ListenEventsResponse) GetEvents() []*Event { + if m != nil { + return m.Events + } + return nil +} + +// Request for GetLogs(). +type GetLogsRequest struct { + // ID of the pod which we will get logs from, required. + PodId string `protobuf:"bytes,1,opt,name=pod_id" json:"pod_id,omitempty"` + // Name of the app within the pod which we will get logs + // from, optional. If not set, then the logs of all the + // apps within the pod will be returned. + AppName string `protobuf:"bytes,2,opt,name=app_name" json:"app_name,omitempty"` + // Number of most recent lines to return, optional. + Lines int32 `protobuf:"varint,3,opt,name=lines" json:"lines,omitempty"` + // If true, then a response stream will not be closed, + // and new log response will be sent via the stream, default is false. + Follow bool `protobuf:"varint,4,opt,name=follow" json:"follow,omitempty"` + // If set, then only the logs after the timestamp will + // be returned, optional. + SinceTime int64 `protobuf:"varint,5,opt,name=since_time" json:"since_time,omitempty"` + // If set, then only the logs before the timestamp will + // be returned, optional. + UntilTime int64 `protobuf:"varint,6,opt,name=until_time" json:"until_time,omitempty"` +} + +func (m *GetLogsRequest) Reset() { *m = GetLogsRequest{} } +func (m *GetLogsRequest) String() string { return proto.CompactTextString(m) } +func (*GetLogsRequest) ProtoMessage() {} + +// Response for GetLogs(). +type GetLogsResponse struct { + // List of the log lines that returned, optional as the response can contain no logs. + Lines []string `protobuf:"bytes,1,rep,name=lines" json:"lines,omitempty"` +} + +func (m *GetLogsResponse) Reset() { *m = GetLogsResponse{} } +func (m *GetLogsResponse) String() string { return proto.CompactTextString(m) } +func (*GetLogsResponse) ProtoMessage() {} + +func init() { + proto.RegisterEnum("v1alpha.ImageType", ImageType_name, ImageType_value) + proto.RegisterEnum("v1alpha.AppState", AppState_name, AppState_value) + proto.RegisterEnum("v1alpha.PodState", PodState_name, PodState_value) + proto.RegisterEnum("v1alpha.EventType", EventType_name, EventType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Client API for PublicAPI service + +type PublicAPIClient interface { + // GetInfo gets the rkt's information on the machine. + GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) + // ListPods lists rkt pods on the machine. + ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*ListPodsResponse, error) + // InspectPod gets detailed pod information of the specified pod. + InspectPod(ctx context.Context, in *InspectPodRequest, opts ...grpc.CallOption) (*InspectPodResponse, error) + // ListImages lists the images on the machine. + ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) + // InspectImage gets the detailed image information of the specified image. + InspectImage(ctx context.Context, in *InspectImageRequest, opts ...grpc.CallOption) (*InspectImageResponse, error) + // ListenEvents listens for the events, it will return a response stream + // that will contain event objects. + ListenEvents(ctx context.Context, in *ListenEventsRequest, opts ...grpc.CallOption) (PublicAPI_ListenEventsClient, error) + // GetLogs gets the logs for a pod, if the app is also specified, then only the logs + // of the app will be returned. + // + // If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream + // will not be closed after the first response, the future logs will be sent via + // the stream. + GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (PublicAPI_GetLogsClient, error) +} + +type publicAPIClient struct { + cc *grpc.ClientConn +} + +func NewPublicAPIClient(cc *grpc.ClientConn) PublicAPIClient { + return &publicAPIClient{cc} +} + +func (c *publicAPIClient) GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) { + out := new(GetInfoResponse) + err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/GetInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publicAPIClient) ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*ListPodsResponse, error) { + out := new(ListPodsResponse) + err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/ListPods", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publicAPIClient) InspectPod(ctx context.Context, in *InspectPodRequest, opts ...grpc.CallOption) (*InspectPodResponse, error) { + out := new(InspectPodResponse) + err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/InspectPod", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publicAPIClient) ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) { + out := new(ListImagesResponse) + err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/ListImages", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publicAPIClient) InspectImage(ctx context.Context, in *InspectImageRequest, opts ...grpc.CallOption) (*InspectImageResponse, error) { + out := new(InspectImageResponse) + err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/InspectImage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publicAPIClient) ListenEvents(ctx context.Context, in *ListenEventsRequest, opts ...grpc.CallOption) (PublicAPI_ListenEventsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_PublicAPI_serviceDesc.Streams[0], c.cc, "/v1alpha.PublicAPI/ListenEvents", opts...) + if err != nil { + return nil, err + } + x := &publicAPIListenEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type PublicAPI_ListenEventsClient interface { + Recv() (*ListenEventsResponse, error) + grpc.ClientStream +} + +type publicAPIListenEventsClient struct { + grpc.ClientStream +} + +func (x *publicAPIListenEventsClient) Recv() (*ListenEventsResponse, error) { + m := new(ListenEventsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *publicAPIClient) GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (PublicAPI_GetLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_PublicAPI_serviceDesc.Streams[1], c.cc, "/v1alpha.PublicAPI/GetLogs", opts...) + if err != nil { + return nil, err + } + x := &publicAPIGetLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type PublicAPI_GetLogsClient interface { + Recv() (*GetLogsResponse, error) + grpc.ClientStream +} + +type publicAPIGetLogsClient struct { + grpc.ClientStream +} + +func (x *publicAPIGetLogsClient) Recv() (*GetLogsResponse, error) { + m := new(GetLogsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for PublicAPI service + +type PublicAPIServer interface { + // GetInfo gets the rkt's information on the machine. + GetInfo(context.Context, *GetInfoRequest) (*GetInfoResponse, error) + // ListPods lists rkt pods on the machine. + ListPods(context.Context, *ListPodsRequest) (*ListPodsResponse, error) + // InspectPod gets detailed pod information of the specified pod. + InspectPod(context.Context, *InspectPodRequest) (*InspectPodResponse, error) + // ListImages lists the images on the machine. + ListImages(context.Context, *ListImagesRequest) (*ListImagesResponse, error) + // InspectImage gets the detailed image information of the specified image. + InspectImage(context.Context, *InspectImageRequest) (*InspectImageResponse, error) + // ListenEvents listens for the events, it will return a response stream + // that will contain event objects. + ListenEvents(*ListenEventsRequest, PublicAPI_ListenEventsServer) error + // GetLogs gets the logs for a pod, if the app is also specified, then only the logs + // of the app will be returned. + // + // If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream + // will not be closed after the first response, the future logs will be sent via + // the stream. + GetLogs(*GetLogsRequest, PublicAPI_GetLogsServer) error +} + +func RegisterPublicAPIServer(s *grpc.Server, srv PublicAPIServer) { + s.RegisterService(&_PublicAPI_serviceDesc, srv) +} + +func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(GetInfoRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(PublicAPIServer).GetInfo(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(ListPodsRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(PublicAPIServer).ListPods(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(InspectPodRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(PublicAPIServer).InspectPod(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(ListImagesRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(PublicAPIServer).ListImages(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _PublicAPI_InspectImage_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(InspectImageRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(PublicAPIServer).InspectImage(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _PublicAPI_ListenEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListenEventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(PublicAPIServer).ListenEvents(m, &publicAPIListenEventsServer{stream}) +} + +type PublicAPI_ListenEventsServer interface { + Send(*ListenEventsResponse) error + grpc.ServerStream +} + +type publicAPIListenEventsServer struct { + grpc.ServerStream +} + +func (x *publicAPIListenEventsServer) Send(m *ListenEventsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _PublicAPI_GetLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(PublicAPIServer).GetLogs(m, &publicAPIGetLogsServer{stream}) +} + +type PublicAPI_GetLogsServer interface { + Send(*GetLogsResponse) error + grpc.ServerStream +} + +type publicAPIGetLogsServer struct { + grpc.ServerStream +} + +func (x *publicAPIGetLogsServer) Send(m *GetLogsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _PublicAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1alpha.PublicAPI", + HandlerType: (*PublicAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetInfo", + Handler: _PublicAPI_GetInfo_Handler, + }, + { + MethodName: "ListPods", + Handler: _PublicAPI_ListPods_Handler, + }, + { + MethodName: "InspectPod", + Handler: _PublicAPI_InspectPod_Handler, + }, + { + MethodName: "ListImages", + Handler: _PublicAPI_ListImages_Handler, + }, + { + MethodName: "InspectImage", + Handler: _PublicAPI_InspectImage_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListenEvents", + Handler: _PublicAPI_ListenEvents_Handler, + ServerStreams: true, + }, + { + StreamName: "GetLogs", + Handler: _PublicAPI_GetLogs_Handler, + ServerStreams: true, + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.proto b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.proto new file mode 100644 index 00000000000..d3dc26cf599 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.proto @@ -0,0 +1,416 @@ +// Copyright 2015 The rkt Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// To compile, run 'protoc -I api/v1alpha api/v1alpha/api.proto --go_out=plugins=grpc:api/v1alpha' in rkt root directory. +// The protoc version must be 3.0.0. + +// *************************************************** // +// ************ WARNING - HERE BE DRAGONS ************ // +// // +// The API defined here is proposed, experimental, // +// and (for now) subject to change at any time. // +// // +// Do not use it. // +// // +// If you think you want to use it, or for any other // +// queries, contact // +// or file an issue on github.com/coreos/rkt // +// // +// *************************************************** // +// ****************** END WARNING ******************** // + +syntax = "proto3"; + +package v1alpha; + +// ImageType defines the supported image type. +enum ImageType { + IMAGE_TYPE_UNDEFINED = 0; + IMAGE_TYPE_APPC = 1; + IMAGE_TYPE_DOCKER = 2; + IMAGE_TYPE_OCI = 3; +} + +// ImageFormat defines the format of the image. +message ImageFormat { + // Type of the image, required. + ImageType type = 1; + + // Version of the image format, required. + string version = 2; +} + +// Image describes the image's information. +message Image { + // Base format of the image, required. This indicates the original format + // for the image as nowadays all the image formats will be transformed to + // ACI. + ImageFormat base_format = 1; + + // ID of the image, a string that can be used to uniquely identify the image, + // e.g. sha512 hash of the ACIs, required. + string id = 2; + + // Name of the image in the image manifest, e.g. 'coreos.com/etcd', optional. + string name = 3; + + // Version of the image, e.g. 'latest', '2.0.10', optional. + string version = 4; + + // Timestamp of when the image is imported, it is the seconds since epoch, optional. + int64 import_timestamp = 5; + + // JSON-encoded byte array that represents the image manifest, optional. + bytes manifest = 6; +} + +// Network describes the network information of a pod. +message Network { + // Name of the network that a pod belongs to, required. + string name = 1; + + // Pod's IPv4 address within the network, optional if IPv6 address is given. + string ipv4 = 2; + + // Pod's IPv6 address within the network, optional if IPv4 address is given. + string ipv6 = 3; +} + +// AppState defines the possible states of the app. +enum AppState { + APP_STATE_UNDEFINED = 0; + APP_STATE_RUNNING = 1; + APP_STATE_EXITED = 2; +} + +// App describes the information of an app that's running in a pod. +message App { + // Name of the app, required. + string name = 1; + + // Image used by the app, required. However, this may only contain the image id + // if it is returned by ListPods(). + Image image = 2; + + // State of the app. optional, non-empty only if it's returned by InspectPod(). + AppState state = 3; + + // Exit code of the app. optional, only valid if it's returned by InspectPod() and + // the app has already exited. + sint32 exit_code = 4; +} + +// PodState defines the possible states of the pod. +// See https://github.com/coreos/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed +// explanation of each state. +enum PodState { + POD_STATE_UNDEFINED = 0; + + // States before the pod is running. + POD_STATE_EMBRYO = 1; // Pod is created, ready to entering 'preparing' state. + POD_STATE_PREPARING = 2; // Pod is being prepared. On success it will become 'prepared', otherwise it will become 'aborted prepared'. + POD_STATE_PREPARED = 3; // Pod has been successfully prepared, ready to enter 'running' state. it can also enter 'deleting' if it's garbage collected before running. + + // State that indicates the pod is running. + POD_STATE_RUNNING = 4; // Pod is running, when it exits, it will become 'exited'. + + // States that indicates the pod is exited, and will never run. + POD_STATE_ABORTED_PREPARE = 5; // Pod failed to prepare, it will only be garbage collected and will never run again. + POD_STATE_EXITED = 6; // Pod has exited, it now can be garbage collected. + POD_STATE_DELETING = 7; // Pod is being garbage collected, after that it will enter 'garbage' state. + POD_STATE_GARBAGE = 8; // Pod is marked as garbage collected, it no longer exists on the machine. +} + +// Pod describes a pod's information. +message Pod { + // ID of the pod, in the form of a UUID, required. + string id = 1; + + // PID of the pod, optional, only valid if it's returned by InspectPod(). A negative value means the pod has exited. + sint32 pid = 2; + + // State of the pod, required. + PodState state = 3; + + // List of apps in the pod, required. + repeated App apps = 4; + + // Network information of the pod, optional, non-empty if the pod is running in private net. + // Note that a pod can be in multiple networks. + repeated Network networks = 5; + + // JSON-encoded byte array that represents the pod manifest of the pod, required. + bytes manifest = 6; +} + +message KeyValue { + // Key part of the key-value pair. + string Key = 1; + // Value part of the key-value pair. + string value = 2; +} + +// PodFilter defines the condition that the returned pods need to satisfy in ListPods(). +// The conditions are combined by 'AND'. +message PodFilter { + // If not empty, the pods that have any of the ids will be returned. + repeated string ids = 1; + + // If not empty, the pods that have any of the states will be returned. + repeated PodState states = 2; + + // If not empty, the pods that have any of the apps will be returned. + repeated string app_names = 3; + + // If not empty, the pods that have any of the images(in the apps) will be returned + repeated string image_ids = 4; + + // If not empty, the pods that are in any of the networks will be returned. + repeated string network_names = 5; + + // If not empty, the pods that have any of the annotations will be returned. + repeated KeyValue annotations = 6; +} + +// ImageFilter defines the condition that the returned images need to satisfy in ListImages(). +// The conditions are combined by 'AND'. +message ImageFilter { + // If not empty, the images that have any of the ids will be returned. + repeated string ids = 1; + + // if not empty, the images that have any of the prefixes in the name will be returned. + repeated string prefixes = 2; + + // If not empty, the images that have any of the base names will be returned. + // For example, both 'coreos.com/etcd' and 'k8s.io/etcd' will be returned if 'etcd' is included, + // however 'k8s.io/etcd-backup' will not be returned. + repeated string base_names = 3; + + // If not empty, the images that have any of the keywords in the name will be returned. + // For example, both 'kubernetes-etcd', 'etcd:latest' will be returned if 'etcd' is included, + repeated string keywords = 4; + + // If not empty, the images that have any of the labels will be returned. + repeated KeyValue labels = 5; + + // If set, the images that are imported after this timestamp will be returned. + int64 imported_after = 6; + + // If set, the images that are imported before this timestamp will be returned. + int64 imported_before = 7; + + // If not empty, the images that have any of the annotations will be returned. + repeated KeyValue annotations = 8; +} + +// Info describes the information of rkt on the machine. +message Info { + // Version of rkt, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/). + string rkt_version = 1; + + // Version of appc, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/). + string appc_version = 2; + + // Latest version of the api that's supported by the service, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/). + string api_version = 3; +} + +// EventType defines the type of the events that will be received via ListenEvents(). +enum EventType { + EVENT_TYPE_UNDEFINED = 0; + + // Pod events. + EVENT_TYPE_POD_PREPARED = 1; + EVENT_TYPE_POD_PREPARE_ABORTED = 2; + EVENT_TYPE_POD_STARTED = 3; + EVENT_TYPE_POD_EXITED = 4; + EVENT_TYPE_POD_GARBAGE_COLLECTED = 5; + + // App events. + EVENT_TYPE_APP_STARTED = 6; + EVENT_TYPE_APP_EXITED = 7; // (XXX)yifan: Maybe also return exit code in the event object? + + // Image events. + EVENT_TYPE_IMAGE_IMPORTED = 8; + EVENT_TYPE_IMAGE_REMOVED = 9; +} + +// Event describes the events that will be received via ListenEvents(). +message Event { + // Type of the event, required. + EventType type = 1; + + // ID of the subject that causes the event, required. + // If the event is a pod or app event, the id is the pod's uuid. + // If the event is an image event, the id is the image's id. + string id = 2; + + // Name of the subject that causes the event, required. + // If the event is a pod event, the name is the pod's name. + // If the event is an app event, the name is the app's name. + // If the event is an image event, the name is the image's name. + string from = 3; + + // Timestamp of when the event happens, it is the seconds since epoch, required. + int64 time = 4; + + // Data of the event, in the form of key-value pairs, optional. + repeated KeyValue data = 5; +} + +// EventFilter defines the condition that the returned events needs to satisfy in ListImages(). +// The condition are combined by 'AND'. +message EventFilter { + // If not empty, then only returns the events that have the listed types. + repeated EventType types = 1; + + // If not empty, then only returns the events whose 'id' is included in the listed ids. + repeated string ids = 2; + + // If not empty, then only returns the events whose 'from' is included in the listed names. + repeated string names = 3; + + // If set, then only returns the events after this timestamp. + // If the server starts after since_time, then only the events happened after the start of the server will be returned. + // If since_time is a future timestamp, then no events will be returned until that time. + int64 since_time = 4; + + // If set, then only returns the events before this timestamp. + // If it is a future timestamp, then the event stream will be closed at that moment. + int64 until_time = 5; +} + + +// Request for GetInfo(). +message GetInfoRequest {} + +// Response for GetInfo(). +message GetInfoResponse { + Info info = 1; // Required. +} + +// Request for ListPods(). +message ListPodsRequest { + PodFilter filter = 1; // Optional. +} + +// Response for ListPods(). +message ListPodsResponse { + repeated Pod pods = 1; // Required. +} + +// Request for InspectPod(). +message InspectPodRequest { + // ID of the pod which we are querying status for, required. + string id = 1; +} + +// Response for InspectPod(). +message InspectPodResponse { + Pod pod = 1; // Required. +} + +// Request for ListImages(). +message ListImagesRequest { + ImageFilter filter = 1; // Optional. +} + +// Response for ListImages(). +message ListImagesResponse { + repeated Image images = 1; // Required. +} + +// Request for InspectImage(). +message InspectImageRequest { + string id = 1; // Required. +} + +// Response for InspectImage(). +message InspectImageResponse { + Image image = 1; // Required. +} + +// Request for ListenEvents(). +message ListenEventsRequest { + EventFilter filter = 1; // Optional. +} + +// Response for ListenEvents(). +message ListenEventsResponse { + // Aggregate multiple events to reduce round trips, optional as the response can contain no events. + repeated Event events = 1; +} + +// Request for GetLogs(). +message GetLogsRequest { + // ID of the pod which we will get logs from, required. + string pod_id = 1; + + // Name of the app within the pod which we will get logs + // from, optional. If not set, then the logs of all the + // apps within the pod will be returned. + string app_name = 2; + + // Number of most recent lines to return, optional. + int32 lines = 3; + + // If true, then a response stream will not be closed, + // and new log response will be sent via the stream, default is false. + bool follow = 4; + + // If set, then only the logs after the timestamp will + // be returned, optional. + int64 since_time = 5; + + // If set, then only the logs before the timestamp will + // be returned, optional. + int64 until_time = 6; +} + +// Response for GetLogs(). +message GetLogsResponse { + // List of the log lines that returned, optional as the response can contain no logs. + repeated string lines = 1; +} + +// PublicAPI defines the read-only APIs that will be supported. +// These will be handled over TCP sockets. +service PublicAPI { + // GetInfo gets the rkt's information on the machine. + rpc GetInfo (GetInfoRequest) returns (GetInfoResponse) {} + + // ListPods lists rkt pods on the machine. + rpc ListPods (ListPodsRequest) returns (ListPodsResponse) {} + + // InspectPod gets detailed pod information of the specified pod. + rpc InspectPod (InspectPodRequest) returns (InspectPodResponse) {} + + // ListImages lists the images on the machine. + rpc ListImages (ListImagesRequest) returns (ListImagesResponse) {} + + // InspectImage gets the detailed image information of the specified image. + rpc InspectImage (InspectImageRequest) returns (InspectImageResponse) {} + + // ListenEvents listens for the events, it will return a response stream + // that will contain event objects. + rpc ListenEvents (ListenEventsRequest) returns (stream ListenEventsResponse) {} + + // GetLogs gets the logs for a pod, if the app is also specified, then only the logs + // of the app will be returned. + // + // If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream + // will not be closed after the first response, the future logs will be sent via + // the stream. + rpc GetLogs(GetLogsRequest) returns (stream GetLogsResponse) {} +} diff --git a/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000000..3f90b7300d4 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/events.go b/Godeps/_workspace/src/golang.org/x/net/trace/events.go new file mode 100644 index 00000000000..e66c7e32828 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/events.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, +}).Parse(eventsHTML)) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl.Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000000..bb42aa5320d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go @@ -0,0 +1,356 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl.Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +// Input: data +var distTmpl = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/trace.go b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000000..c87290b76eb --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go @@ -0,0 +1,1057 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customise its authorisation requirements. +// +// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + host, _, err := net.SplitHostPort(req.RemoteAddr) + switch { + case err != nil: // Badly formed address; fail closed. + return false, false + case host == "localhost" || host == "127.0.0.1" || host == "::1": + return true, true + default: + return false, false + } +} + +func init() { + http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + Render(w, req, sensitive) + }) + http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + RenderEvents(w, req, sensitive) + }) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam, _ := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.events = make([]event, 0, maxEventsPerTrace) + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + What interface{} // string or fmt.Stringer + Sensitive bool // whether this event contains sensitive information +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a requestz.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < cap(tr.events) { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((cap(tr.events) - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[cap(tr.events)-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.events = make([]event, 0, m) + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, +}).Parse(pageHTML)) + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml index 8e187926684..25035601429 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml +++ b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml @@ -1,10 +1,4 @@ -sudo: false - language: go -install: - - go get -v -t -d google.golang.org/grpc/... - script: - - go test -v -cpu 1,4 google.golang.org/grpc/... - - go test -v -race -cpu 1,4 google.golang.org/grpc/... + - make test testrace diff --git a/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md index b6decb6d8c8..407d384a7c8 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md @@ -20,8 +20,4 @@ When filing an issue, make sure to answer these five questions: 5. What did you see instead? ### Contributing code -Please read the Contribution Guidelines before sending patches. - -We will not accept GitHub pull requests once Gerrit is setup (we will use Gerrit instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/Makefile b/Godeps/_workspace/src/google.golang.org/grpc/Makefile new file mode 100644 index 00000000000..0dc225ff41e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/Makefile @@ -0,0 +1,47 @@ +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + +all: test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go get -v github.com/golang/protobuf/protoc-gen-go + for file in $$(git ls-files '*.proto'); do \ + protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ + done + +test: testdeps + go test -v -cpu 1,4 google.golang.org/grpc/... + +testrace: testdeps + go test -v -race -cpu 1,4 google.golang.org/grpc/... + +clean: + go clean google.golang.org/grpc/... diff --git a/Godeps/_workspace/src/google.golang.org/grpc/README.md b/Godeps/_workspace/src/google.golang.org/grpc/README.md index caa40261f9f..f16d406a8eb 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/README.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/README.md @@ -13,9 +13,14 @@ To install this package, you need to install Go 1.4 and setup your Go workspace $ go get google.golang.org/grpc ``` +Prerequisites +------------- + +This requires Go 1.4 or above. + Documentation ------------- -You can find more detailed documentation and examples in the [grpc-common repository](http://github.com/grpc/grpc-common). +You can find more detailed documentation and examples in the [examples directory](examples/). Status ------ diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go index cda92a6d71d..7215d35a51c 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go @@ -40,13 +40,11 @@ import ( "io" "math" "net" - "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/grpclog" - testpb "google.golang.org/grpc/interop/grpc_testing" ) func newPayload(t testpb.PayloadType, size int) *testpb.Payload { @@ -62,7 +60,7 @@ func newPayload(t testpb.PayloadType, size int) *testpb.Payload { grpclog.Fatalf("Unsupported payload type: %d", t) } return &testpb.Payload{ - Type: t.Enum(), + Type: t, Body: body, } } @@ -70,49 +68,13 @@ func newPayload(t testpb.PayloadType, size int) *testpb.Payload { type testServer struct { } -func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return new(testpb.Empty), nil -} - func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{ - Payload: newPayload(in.GetResponseType(), int(in.GetResponseSize())), + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), }, nil } -func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { - cs := args.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(args.GetResponseType(), int(c.GetSize())), - }); err != nil { - return err - } - } - return nil -} - -func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { - var sum int - for { - in, err := stream.Recv() - if err == io.EOF { - return stream.SendAndClose(&testpb.StreamingInputCallResponse{ - AggregatedPayloadSize: proto.Int32(int32(sum)), - }) - } - if err != nil { - return err - } - p := in.GetPayload().GetBody() - sum += len(p) - } -} - -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *testServer) StreamingCall(stream testpb.TestService_StreamingCallServer) error { for { in, err := stream.Recv() if err == io.EOF { @@ -122,53 +84,19 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ if err != nil { return err } - cs := in.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(in.GetResponseType(), int(c.GetSize())), - }); err != nil { - return err - } - } - } -} - -func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { - var msgBuf []*testpb.StreamingOutputCallRequest - for { - in, err := stream.Recv() - if err == io.EOF { - // read done. - break - } - if err != nil { + if err := stream.Send(&testpb.SimpleResponse{ + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), + }); err != nil { return err } - msgBuf = append(msgBuf, in) } - for _, m := range msgBuf { - cs := m.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(m.GetResponseType(), int(c.GetSize())), - }); err != nil { - return err - } - } - } - return nil } -// StartServer starts a gRPC server serving a benchmark service. It returns its -// listen address and a function to stop the server. -func StartServer() (string, func()) { - lis, err := net.Listen("tcp", ":0") +// StartServer starts a gRPC server serving a benchmark service on the given +// address, which may be something like "localhost:0". It returns its listen +// address and a function to stop the server. +func StartServer(addr string) (string, func()) { + lis, err := net.Listen("tcp", addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } @@ -184,8 +112,8 @@ func StartServer() (string, func()) { func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), + ResponseType: pl.Type, + ResponseSize: int32(respSize), Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { @@ -193,9 +121,25 @@ func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { } } +// DoStreamingRoundTrip performs a round trip for a single streaming rpc. +func DoStreamingRoundTrip(tc testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient, reqSize, respSize int) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("StreamingCall(_).Send: %v", err) + } + if _, err := stream.Recv(); err != nil { + grpclog.Fatalf("StreamingCall(_).Recv: %v", err) + } +} + // NewClientConn creates a gRPC client connection to addr. func NewClientConn(addr string) *grpc.ClientConn { - conn, err := grpc.Dial(addr) + conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go index 595d80ba5bb..f9e2a83dbde 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go @@ -9,29 +9,95 @@ import ( "sync" "time" + "golang.org/x/net/context" + "google.golang.org/grpc" "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/grpclog" - testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( server = flag.String("server", "", "The server address") maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs") duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client") + trace = flag.Bool("trace", true, "Whether tracing is on") + rpcType = flag.Int("rpc_type", 0, + `Configure different client rpc type. Valid options are: + 0 : unary call; + 1 : streaming call.`) ) -func caller(client testpb.TestServiceClient) { +func unaryCaller(client testpb.TestServiceClient) { benchmark.DoUnaryCall(client, 1, 1) } -func closeLoop() { - s := stats.NewStats(256) - conn := benchmark.NewClientConn(*server) - tc := testpb.NewTestServiceClient(conn) - // Warm up connection. +func streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) { + benchmark.DoStreamingRoundTrip(client, stream, 1, 1) +} + +func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.TestServiceClient) { + s = stats.NewStats(256) + conn = benchmark.NewClientConn(*server) + tc = testpb.NewTestServiceClient(conn) + return s, conn, tc +} + +func closeLoopUnary() { + s, conn, tc := buildConnection() + for i := 0; i < 100; i++ { - caller(tc) + unaryCaller(tc) + } + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + for _ = range ch { + start := time.Now() + unaryCaller(tc) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) + +} + +func closeLoopStream() { + s, conn, tc := buildConnection() + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for i := 0; i < 100; i++ { + streamCaller(tc, stream) } ch := make(chan int, *maxConcurrentRPCs*4) var ( @@ -44,7 +110,7 @@ func closeLoop() { go func() { for _ = range ch { start := time.Now() - caller(tc) + streamCaller(tc, stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) @@ -75,6 +141,7 @@ func closeLoop() { func main() { flag.Parse() + grpc.EnableTracing = *trace go func() { lis, err := net.Listen("tcp", ":0") if err != nil { @@ -85,5 +152,10 @@ func main() { grpclog.Fatalf("Failed to serve: %v", err) } }() - closeLoop() + switch *rpcType { + case 0: + closeLoopUnary() + case 1: + closeLoopStream() + } } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go index 261ba1cc277..619c450cad5 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go @@ -1,28 +1,32 @@ // Code generated by protoc-gen-go. -// source: src/google.golang.org/grpc/test/grpc_testing/test.proto +// source: test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - src/google.golang.org/grpc/test/grpc_testing/test.proto + test.proto It has these top-level messages: - Empty + StatsRequest + ServerStats Payload + HistogramData + ClientConfig + Mark + ClientArgs + ClientStats + ClientStatus + ServerConfig + ServerArgs + ServerStatus SimpleRequest SimpleResponse - StreamingInputCallRequest - StreamingInputCallResponse - ResponseParameters - StreamingOutputCallRequest - StreamingOutputCallResponse */ package grpc_testing import proto "github.com/golang/protobuf/proto" -import math "math" import ( context "golang.org/x/net/context" @@ -35,9 +39,7 @@ var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal -var _ = math.Inf -// The type of payload that should be returned. type PayloadType int32 const ( @@ -60,93 +62,260 @@ var PayloadType_value = map[string]int32{ "RANDOM": 2, } -func (x PayloadType) Enum() *PayloadType { - p := new(PayloadType) - *p = x - return p -} func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } -func (x *PayloadType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") - if err != nil { - return err - } - *x = PayloadType(value) - return nil + +type ClientType int32 + +const ( + ClientType_SYNCHRONOUS_CLIENT ClientType = 0 + ClientType_ASYNC_CLIENT ClientType = 1 +) + +var ClientType_name = map[int32]string{ + 0: "SYNCHRONOUS_CLIENT", + 1: "ASYNC_CLIENT", +} +var ClientType_value = map[string]int32{ + "SYNCHRONOUS_CLIENT": 0, + "ASYNC_CLIENT": 1, } -type Empty struct { - XXX_unrecognized []byte `json:"-"` +func (x ClientType) String() string { + return proto.EnumName(ClientType_name, int32(x)) } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} +type ServerType int32 + +const ( + ServerType_SYNCHRONOUS_SERVER ServerType = 0 + ServerType_ASYNC_SERVER ServerType = 1 +) + +var ServerType_name = map[int32]string{ + 0: "SYNCHRONOUS_SERVER", + 1: "ASYNC_SERVER", +} +var ServerType_value = map[string]int32{ + "SYNCHRONOUS_SERVER": 0, + "ASYNC_SERVER": 1, +} + +func (x ServerType) String() string { + return proto.EnumName(ServerType_name, int32(x)) +} + +type RpcType int32 + +const ( + RpcType_UNARY RpcType = 0 + RpcType_STREAMING RpcType = 1 +) + +var RpcType_name = map[int32]string{ + 0: "UNARY", + 1: "STREAMING", +} +var RpcType_value = map[string]int32{ + "UNARY": 0, + "STREAMING": 1, +} + +func (x RpcType) String() string { + return proto.EnumName(RpcType_name, int32(x)) +} + +type StatsRequest struct { + // run number + TestNum int32 `protobuf:"varint,1,opt,name=test_num" json:"test_num,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} + +type ServerStats struct { + // wall clock time + TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed" json:"time_elapsed,omitempty"` + // user time used by the server process and threads + TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user" json:"time_user,omitempty"` + // server time used by the server process and all threads + TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system" json:"time_system,omitempty"` +} + +func (m *ServerStats) Reset() { *m = ServerStats{} } +func (m *ServerStats) String() string { return proto.CompactTextString(m) } +func (*ServerStats) ProtoMessage() {} -// A block of data, to simply increase gRPC message size. type Payload struct { // The type of data in body. - Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` - XXX_unrecognized []byte `json:"-"` + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} -func (m *Payload) GetType() PayloadType { - if m != nil && m.Type != nil { - return *m.Type - } - return PayloadType_COMPRESSABLE +type HistogramData struct { + Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` + MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen" json:"min_seen,omitempty"` + MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen" json:"max_seen,omitempty"` + Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` + SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares" json:"sum_of_squares,omitempty"` + Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` } -func (m *Payload) GetBody() []byte { +func (m *HistogramData) Reset() { *m = HistogramData{} } +func (m *HistogramData) String() string { return proto.CompactTextString(m) } +func (*HistogramData) ProtoMessage() {} + +type ClientConfig struct { + ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets" json:"server_targets,omitempty"` + ClientType ClientType `protobuf:"varint,2,opt,name=client_type,enum=grpc.testing.ClientType" json:"client_type,omitempty"` + EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` + OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel" json:"outstanding_rpcs_per_channel,omitempty"` + ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels" json:"client_channels,omitempty"` + PayloadSize int32 `protobuf:"varint,6,opt,name=payload_size" json:"payload_size,omitempty"` + // only for async client: + AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads" json:"async_client_threads,omitempty"` + RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` +} + +func (m *ClientConfig) Reset() { *m = ClientConfig{} } +func (m *ClientConfig) String() string { return proto.CompactTextString(m) } +func (*ClientConfig) ProtoMessage() {} + +// Request current stats +type Mark struct { +} + +func (m *Mark) Reset() { *m = Mark{} } +func (m *Mark) String() string { return proto.CompactTextString(m) } +func (*Mark) ProtoMessage() {} + +type ClientArgs struct { + Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` + Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` +} + +func (m *ClientArgs) Reset() { *m = ClientArgs{} } +func (m *ClientArgs) String() string { return proto.CompactTextString(m) } +func (*ClientArgs) ProtoMessage() {} + +func (m *ClientArgs) GetSetup() *ClientConfig { if m != nil { - return m.Body + return m.Setup + } + return nil +} + +func (m *ClientArgs) GetMark() *Mark { + if m != nil { + return m.Mark + } + return nil +} + +type ClientStats struct { + Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` + TimeElapsed float64 `protobuf:"fixed64,3,opt,name=time_elapsed" json:"time_elapsed,omitempty"` + TimeUser float64 `protobuf:"fixed64,4,opt,name=time_user" json:"time_user,omitempty"` + TimeSystem float64 `protobuf:"fixed64,5,opt,name=time_system" json:"time_system,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} + +func (m *ClientStats) GetLatencies() *HistogramData { + if m != nil { + return m.Latencies + } + return nil +} + +type ClientStatus struct { + Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ClientStatus) Reset() { *m = ClientStatus{} } +func (m *ClientStatus) String() string { return proto.CompactTextString(m) } +func (*ClientStatus) ProtoMessage() {} + +func (m *ClientStatus) GetStats() *ClientStats { + if m != nil { + return m.Stats + } + return nil +} + +type ServerConfig struct { + ServerType ServerType `protobuf:"varint,1,opt,name=server_type,enum=grpc.testing.ServerType" json:"server_type,omitempty"` + Threads int32 `protobuf:"varint,2,opt,name=threads" json:"threads,omitempty"` + EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} + +type ServerArgs struct { + Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` + Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` +} + +func (m *ServerArgs) Reset() { *m = ServerArgs{} } +func (m *ServerArgs) String() string { return proto.CompactTextString(m) } +func (*ServerArgs) ProtoMessage() {} + +func (m *ServerArgs) GetSetup() *ServerConfig { + if m != nil { + return m.Setup + } + return nil +} + +func (m *ServerArgs) GetMark() *Mark { + if m != nil { + return m.Mark + } + return nil +} + +type ServerStatus struct { + Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` +} + +func (m *ServerStatus) Reset() { *m = ServerStatus{} } +func (m *ServerStatus) String() string { return proto.CompactTextString(m) } +func (*ServerStatus) ProtoMessage() {} + +func (m *ServerStatus) GetStats() *ServerStats { + if m != nil { + return m.Stats } return nil } -// Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` + ResponseSize int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - // Whether SimpleResponse should include username. - FillUsername *bool `protobuf:"varint,4,opt,name=fill_username" json:"fill_username,omitempty"` - // Whether SimpleResponse should include OAuth scope. - FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope" json:"fill_oauth_scope,omitempty"` - XXX_unrecognized []byte `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} -func (m *SimpleRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *SimpleRequest) GetResponseSize() int32 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload @@ -154,30 +323,8 @@ func (m *SimpleRequest) GetPayload() *Payload { return nil } -func (m *SimpleRequest) GetFillUsername() bool { - if m != nil && m.FillUsername != nil { - return *m.FillUsername - } - return false -} - -func (m *SimpleRequest) GetFillOauthScope() bool { - if m != nil && m.FillOauthScope != nil { - return *m.FillOauthScope - } - return false -} - -// Unary response, as configured by the request. type SimpleResponse struct { - // Payload to increase message size. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - // The user the request came from, for verifying authentication was - // successful when the client expected it. - Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` - // OAuth scope. - OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope" json:"oauth_scope,omitempty"` - XXX_unrecognized []byte `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } @@ -191,169 +338,22 @@ func (m *SimpleResponse) GetPayload() *Payload { return nil } -func (m *SimpleResponse) GetUsername() string { - if m != nil && m.Username != nil { - return *m.Username - } - return "" -} - -func (m *SimpleResponse) GetOauthScope() string { - if m != nil && m.OauthScope != nil { - return *m.OauthScope - } - return "" -} - -// Client-streaming request. -type StreamingInputCallRequest struct { - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } -func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallRequest) ProtoMessage() {} - -func (m *StreamingInputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Client-streaming response. -type StreamingInputCallResponse struct { - // Aggregated size of payloads received from the client. - AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size" json:"aggregated_payload_size,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } -func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallResponse) ProtoMessage() {} - -func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { - if m != nil && m.AggregatedPayloadSize != nil { - return *m.AggregatedPayloadSize - } - return 0 -} - -// Configuration for a particular response. -type ResponseParameters struct { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` - // Desired interval between consecutive responses in the response stream in - // microseconds. - IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us" json:"interval_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } -func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } -func (*ResponseParameters) ProtoMessage() {} - -func (m *ResponseParameters) GetSize() int32 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *ResponseParameters) GetIntervalUs() int32 { - if m != nil && m.IntervalUs != nil { - return *m.IntervalUs - } - return 0 -} - -// Server-streaming request. -type StreamingOutputCallRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters" json:"response_parameters,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } -func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallRequest) ProtoMessage() {} - -func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { - if m != nil { - return m.ResponseParameters - } - return nil -} - -func (m *StreamingOutputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Server-streaming response, as configured by the request and parameters. -type StreamingOutputCallResponse struct { - // Payload to increase response size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } -func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallResponse) ProtoMessage() {} - -func (m *StreamingOutputCallResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) + proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) + proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) + proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) } // Client API for TestService service type TestServiceClient interface { - // One empty request followed by one empty response. - EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) } type testServiceClient struct { @@ -364,15 +364,6 @@ func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { return &testServiceClient{cc} } -func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) @@ -382,128 +373,31 @@ func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, op return out, nil } -func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) +func (c *testServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingCall", opts...) if err != nil { return nil, err } - x := &testServiceStreamingOutputCallClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } + x := &testServiceStreamingCallClient{stream} return x, nil } -type TestService_StreamingOutputCallClient interface { - Recv() (*StreamingOutputCallResponse, error) +type TestService_StreamingCallClient interface { + Send(*SimpleRequest) error + Recv() (*SimpleResponse, error) grpc.ClientStream } -type testServiceStreamingOutputCallClient struct { +type testServiceStreamingCallClient struct { grpc.ClientStream } -func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingInputCallClient{stream} - return x, nil -} - -type TestService_StreamingInputCallClient interface { - Send(*StreamingInputCallRequest) error - CloseAndRecv() (*StreamingInputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingInputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { +func (x *testServiceStreamingCallClient) Send(m *SimpleRequest) error { return x.ClientStream.SendMsg(m) } -func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(StreamingInputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceFullDuplexCallClient{stream} - return x, nil -} - -type TestService_FullDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceHalfDuplexCallClient{stream} - return x, nil -} - -type TestService_HalfDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceHalfDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) +func (x *testServiceStreamingCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -513,44 +407,18 @@ func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, // Server API for TestService service type TestServiceServer interface { - // One empty request followed by one empty response. - EmptyCall(context.Context, *Empty) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(TestService_StreamingInputCallServer) error - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(TestService_HalfDuplexCallServer) error + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(TestService_StreamingCallServer) error } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { - in := new(Empty) - if err := codec.Unmarshal(buf, in); err != nil { - return nil, err - } - out, err := srv.(TestServiceServer).EmptyCall(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(SimpleRequest) if err := codec.Unmarshal(buf, in); err != nil { @@ -563,99 +431,26 @@ func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec return out, nil } -func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StreamingOutputCallRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +func _TestService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingCall(&testServiceStreamingCallServer{stream}) } -type TestService_StreamingOutputCallServer interface { - Send(*StreamingOutputCallResponse) error +type TestService_StreamingCallServer interface { + Send(*SimpleResponse) error + Recv() (*SimpleRequest, error) grpc.ServerStream } -type testServiceStreamingOutputCallServer struct { +type testServiceStreamingCallServer struct { grpc.ServerStream } -func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { +func (x *testServiceStreamingCallServer) Send(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } -func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) -} - -type TestService_StreamingInputCallServer interface { - SendAndClose(*StreamingInputCallResponse) error - Recv() (*StreamingInputCallRequest, error) - grpc.ServerStream -} - -type testServiceStreamingInputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { - m := new(StreamingInputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) -} - -type TestService_HalfDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceHalfDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) +func (x *testServiceStreamingCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } @@ -666,10 +461,6 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "EmptyCall", - Handler: _TestService_EmptyCall_Handler, - }, { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, @@ -677,24 +468,172 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ }, Streams: []grpc.StreamDesc{ { - StreamName: "StreamingOutputCall", - Handler: _TestService_StreamingOutputCall_Handler, - ServerStreams: true, - }, - { - StreamName: "StreamingInputCall", - Handler: _TestService_StreamingInputCall_Handler, - ClientStreams: true, - }, - { - StreamName: "FullDuplexCall", - Handler: _TestService_FullDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "HalfDuplexCall", - Handler: _TestService_HalfDuplexCall_Handler, + StreamName: "StreamingCall", + Handler: _TestService_StreamingCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} + +// Client API for Worker service + +type WorkerClient interface { + // Start test with specified workload + RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) + // Start test with specified workload + RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) +} + +type workerClient struct { + cc *grpc.ClientConn +} + +func NewWorkerClient(cc *grpc.ClientConn) WorkerClient { + return &workerClient{cc} +} + +func (c *workerClient) RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[0], c.cc, "/grpc.testing.Worker/RunTest", opts...) + if err != nil { + return nil, err + } + x := &workerRunTestClient{stream} + return x, nil +} + +type Worker_RunTestClient interface { + Send(*ClientArgs) error + Recv() (*ClientStatus, error) + grpc.ClientStream +} + +type workerRunTestClient struct { + grpc.ClientStream +} + +func (x *workerRunTestClient) Send(m *ClientArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerRunTestClient) Recv() (*ClientStatus, error) { + m := new(ClientStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[1], c.cc, "/grpc.testing.Worker/RunServer", opts...) + if err != nil { + return nil, err + } + x := &workerRunServerClient{stream} + return x, nil +} + +type Worker_RunServerClient interface { + Send(*ServerArgs) error + Recv() (*ServerStatus, error) + grpc.ClientStream +} + +type workerRunServerClient struct { + grpc.ClientStream +} + +func (x *workerRunServerClient) Send(m *ServerArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerRunServerClient) Recv() (*ServerStatus, error) { + m := new(ServerStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Worker service + +type WorkerServer interface { + // Start test with specified workload + RunTest(Worker_RunTestServer) error + // Start test with specified workload + RunServer(Worker_RunServerServer) error +} + +func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { + s.RegisterService(&_Worker_serviceDesc, srv) +} + +func _Worker_RunTest_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServer).RunTest(&workerRunTestServer{stream}) +} + +type Worker_RunTestServer interface { + Send(*ClientStatus) error + Recv() (*ClientArgs, error) + grpc.ServerStream +} + +type workerRunTestServer struct { + grpc.ServerStream +} + +func (x *workerRunTestServer) Send(m *ClientStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerRunTestServer) Recv() (*ClientArgs, error) { + m := new(ClientArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Worker_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServer).RunServer(&workerRunServerServer{stream}) +} + +type Worker_RunServerServer interface { + Send(*ServerStatus) error + Recv() (*ServerArgs, error) + grpc.ServerStream +} + +type workerRunServerServer struct { + grpc.ServerStream +} + +func (x *workerRunServerServer) Send(m *ServerStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerRunServerServer) Recv() (*ServerArgs, error) { + m := new(ServerArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Worker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.Worker", + HandlerType: (*WorkerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "RunTest", + Handler: _Worker_RunTest_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "RunServer", + Handler: _Worker_RunServer_Handler, ServerStreams: true, ClientStreams: true, }, diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto index b5bfe053789..e3a27f861b8 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto @@ -1,140 +1,148 @@ // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. -syntax = "proto2"; +syntax = "proto3"; package grpc.testing; -message Empty {} - -// The type of payload that should be returned. enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; + // Compressable text format. + COMPRESSABLE = 0; - // Uncompressable binary format. - UNCOMPRESSABLE = 1; + // Uncompressable binary format. + UNCOMPRESSABLE = 1; - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +message StatsRequest { + // run number + optional int32 test_num = 1; +} + +message ServerStats { + // wall clock time + double time_elapsed = 1; + + // user time used by the server process and threads + double time_user = 2; + + // server time used by the server process and all threads + double time_system = 3; } -// A block of data, to simply increase gRPC message size. message Payload { - // The type of data in body. - optional PayloadType type = 1; - // Primary contents of payload. - optional bytes body = 2; + // The type of data in body. + PayloadType type = 1; + // Primary contents of payload. + bytes body = 2; +} + +message HistogramData { + repeated uint32 bucket = 1; + double min_seen = 2; + double max_seen = 3; + double sum = 4; + double sum_of_squares = 5; + double count = 6; +} + +enum ClientType { + SYNCHRONOUS_CLIENT = 0; + ASYNC_CLIENT = 1; +} + +enum ServerType { + SYNCHRONOUS_SERVER = 0; + ASYNC_SERVER = 1; +} + +enum RpcType { + UNARY = 0; + STREAMING = 1; +} + +message ClientConfig { + repeated string server_targets = 1; + ClientType client_type = 2; + bool enable_ssl = 3; + int32 outstanding_rpcs_per_channel = 4; + int32 client_channels = 5; + int32 payload_size = 6; + // only for async client: + int32 async_client_threads = 7; + RpcType rpc_type = 8; +} + +// Request current stats +message Mark {} + +message ClientArgs { + oneof argtype { + ClientConfig setup = 1; + Mark mark = 2; + } +} + +message ClientStats { + HistogramData latencies = 1; + double time_elapsed = 3; + double time_user = 4; + double time_system = 5; +} + +message ClientStatus { + ClientStats stats = 1; +} + +message ServerConfig { + ServerType server_type = 1; + int32 threads = 2; + bool enable_ssl = 3; +} + +message ServerArgs { + oneof argtype { + ServerConfig setup = 1; + Mark mark = 2; + } +} + +message ServerStatus { + ServerStats stats = 1; + int32 port = 2; } -// Unary request. message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - optional PayloadType response_type = 1; + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + PayloadType response_type = 1; - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 response_size = 2; + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 response_size = 2; - // Optional input payload sent along with the request. - optional Payload payload = 3; - - // Whether SimpleResponse should include username. - optional bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - optional bool fill_oauth_scope = 5; + // Optional input payload sent along with the request. + Payload payload = 3; } -// Unary response, as configured by the request. message SimpleResponse { - // Payload to increase message size. - optional Payload payload = 1; - - // The user the request came from, for verifying authentication was - // successful when the client expected it. - optional string username = 2; - - // OAuth scope. - optional string oauth_scope = 3; + Payload payload = 1; } -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - optional Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - optional int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - optional int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - optional PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - optional Payload payload = 1; -} - -// A simple service to test the various types of RPCs and experiment with -// performance with various types of payload. service TestService { - // One empty request followed by one empty response. - rpc EmptyCall(Empty) returns (Empty); + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - rpc StreamingOutputCall(StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - rpc StreamingInputCall(stream StreamingInputCallRequest) - returns (StreamingInputCallResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - rpc HalfDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); + // One request followed by one response. + // The server returns the client payload as-is. + rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); +} + +service Worker { + // Start test with specified workload + rpc RunTest(stream ClientArgs) returns (stream ClientStatus); + // Start test with specified workload + rpc RunServer(stream ServerArgs) returns (stream ServerStatus); } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go index 747f9f3bf18..090f002fa1b 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go @@ -28,7 +28,7 @@ func main() { grpclog.Fatalf("Failed to serve: %v", err) } }() - addr, stopper := benchmark.StartServer() + addr, stopper := benchmark.StartServer(":0") // listen on all interfaces grpclog.Println("Server Address: ", addr) <-time.After(time.Duration(*duration) * time.Second) stopper() diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go index 30381182033..727808c8bab 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go @@ -251,5 +251,5 @@ func (h *Histogram) findBucket(value int64) (int, error) { } min = b + 1 } - return 0, fmt.Errorf("no bucket for value: %f", value) + return 0, fmt.Errorf("no bucket for value: %d", value) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/call.go b/Godeps/_workspace/src/google.golang.org/grpc/call.go index e15e4f9d5b3..63b7966c160 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/call.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/call.go @@ -35,8 +35,10 @@ package grpc import ( "io" + "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" @@ -97,11 +99,12 @@ type callInfo struct { failFast bool headerMD metadata.MD trailerMD metadata.MD + traceInfo traceInfo // in trace.go } // Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { @@ -113,6 +116,23 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli o.after(&c) } }() + + if EnableTracing { + c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + defer c.traceInfo.tr.Finish() + c.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) + // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. + defer func() { + if err != nil { + c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + c.traceInfo.tr.SetError() + } + }() + } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, @@ -143,6 +163,9 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } return toRPCErr(err) } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) + } stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { @@ -159,6 +182,9 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if _, ok := lastErr.(transport.ConnectionError); ok { continue } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) + } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go index 25c9282ecdd..aacba5b9c79 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go @@ -35,6 +35,7 @@ package grpc import ( "errors" + "fmt" "net" "strings" "sync" @@ -49,19 +50,31 @@ import ( var ( // ErrUnspecTarget indicates that the target address is unspecified. ErrUnspecTarget = errors.New("grpc: target is unspecified") + // ErrNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicityly + // call WithInsecure DialOption to disable security. + ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // ErrCredentialsMisuse indicates that users want to transmit security infomation + // (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") // ErrClientConnClosing indicates that the operation is illegal because // the session is closing. ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the connection could not be // established or re-established within the specified timeout. ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second ) // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - codec Codec - copts transport.ConnectOptions + codec Codec + block bool + insecure bool + copts transport.ConnectOptions } // DialOption configures how we set up the connection. @@ -74,6 +87,21 @@ func WithCodec(c Codec) DialOption { } } +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + // WithTransportCredentials returns a DialOption which configures a // connection level security credentials (e.g., TLS/SSL). func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { @@ -104,19 +132,43 @@ func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) Di } } +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + // Dial creates a client connection the given target. -// TODO(zhaoq): Have an option to make Dial return immediately without waiting -// for connection to complete. func Dial(target string, opts ...DialOption) (*ClientConn, error) { if target == "" { return nil, ErrUnspecTarget } cc := &ClientConn{ - target: target, + target: target, + shutdownChan: make(chan struct{}), } for _, opt := range opts { opt(&cc.dopts) } + if !cc.dopts.insecure { + var ok bool + for _, c := range cc.dopts.copts.AuthOptions { + if _, ok := c.(credentials.TransportAuthenticator); !ok { + continue + } + ok = true + } + if !ok { + return nil, ErrNoTransportSecurity + } + } else { + for _, c := range cc.dopts.copts.AuthOptions { + if c.RequireTransportSecurity() { + return nil, ErrCredentialsMisuse + } + } + } colonPos := strings.LastIndex(target, ":") if colonPos == -1 { colonPos = len(target) @@ -126,15 +178,61 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // Set the default codec. cc.dopts.codec = protoCodec{} } - if err := cc.resetTransport(false); err != nil { - return nil, err + cc.stateCV = sync.NewCond(&cc.mu) + if cc.dopts.block { + if err := cc.resetTransport(false); err != nil { + cc.Close() + return nil, err + } + // Start to monitor the error status of transport. + go cc.transportMonitor() + } else { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := cc.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", target, err) + cc.Close() + return + } + go cc.transportMonitor() + }() } - cc.shutdownChan = make(chan struct{}) - // Start to monitor the error status of transport. - go cc.transportMonitor() return cc, nil } +// ConnectivityState indicates the state of a client connection. +type ConnectivityState int + +const ( + // Idle indicates the ClientConn is idle. + Idle ConnectivityState = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has stated shutting down. + Shutdown +) + +func (s ConnectivityState) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + panic(fmt.Sprintf("unknown connectivity state: %d", s)) + } +} + // ClientConn represents a client connection to an RPC service. type ClientConn struct { target string @@ -142,12 +240,12 @@ type ClientConn struct { dopts dialOptions shutdownChan chan struct{} - mu sync.Mutex + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} - // Indicates the ClientConn is under destruction. - closing bool // Every time a new transport is created, this is incremented by 1. Used // to avoid trying to recreate a transport while the new one is already // under construction. @@ -155,16 +253,59 @@ type ClientConn struct { transport transport.ClientTransport } +// State returns the connectivity state of the ClientConn +func (cc *ClientConn) State() ConnectivityState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.state +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState +// or timeout fires. It returns false if timeout fires and true otherwise. +func (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { + start := time.Now() + cc.mu.Lock() + defer cc.mu.Unlock() + if sourceState != cc.state { + return true + } + expired := timeout <= time.Since(start) + if expired { + return false + } + done := make(chan struct{}) + go func() { + select { + case <-time.After(timeout - time.Since(start)): + cc.mu.Lock() + expired = true + cc.stateCV.Broadcast() + cc.mu.Unlock() + case <-done: + } + }() + defer close(done) + for sourceState == cc.state { + cc.stateCV.Wait() + if expired { + return false + } + } + return true +} + func (cc *ClientConn) resetTransport(closeTransport bool) error { var retries int start := time.Now() for { cc.mu.Lock() + cc.state = Connecting + cc.stateCV.Broadcast() t := cc.transport ts := cc.transportSeq // Avoid wait() picking up a dying transport unnecessarily. cc.transportSeq = 0 - if cc.closing { + if cc.state == Shutdown { cc.mu.Unlock() return ErrClientConnClosing } @@ -185,9 +326,25 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error { return ErrClientConnTimeout } } + sleepTime := backoff(retries) + timeout := sleepTime + if timeout < minConnectTimeout { + timeout = minConnectTimeout + } + if copts.Timeout == 0 || copts.Timeout > timeout { + copts.Timeout = timeout + } + connectTime := time.Now() newTransport, err := transport.NewClientTransport(cc.target, &copts) if err != nil { - sleepTime := backoff(retries) + cc.mu.Lock() + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() + sleepTime -= time.Since(connectTime) + if sleepTime < 0 { + sleepTime = 0 + } // Fail early before falling into sleep. if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { cc.Close() @@ -200,12 +357,14 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error { continue } cc.mu.Lock() - if cc.closing { + if cc.state == Shutdown { // cc.Close() has been invoked. cc.mu.Unlock() newTransport.Close() return ErrClientConnClosing } + cc.state = Ready + cc.stateCV.Broadcast() cc.transport = newTransport cc.transportSeq = ts + 1 if cc.ready != nil { @@ -222,13 +381,17 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error { func (cc *ClientConn) transportMonitor() { for { select { - // shutdownChan is needed to detect the channel teardown when + // shutdownChan is needed to detect the teardown when // the ClientConn is idle (i.e., no RPC in flight). case <-cc.shutdownChan: return case <-cc.transport.Error(): + cc.mu.Lock() + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() if err := cc.resetTransport(true); err != nil { - // The channel is closing. + // The ClientConn is closing. grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) return } @@ -244,7 +407,7 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo for { cc.mu.Lock() switch { - case cc.closing: + case cc.state == Shutdown: cc.mu.Unlock() return nil, 0, ErrClientConnClosing case ts < cc.transportSeq: @@ -276,10 +439,11 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo func (cc *ClientConn) Close() error { cc.mu.Lock() defer cc.mu.Unlock() - if cc.closing { + if cc.state == Shutdown { return ErrClientConnClosing } - cc.closing = true + cc.state = Shutdown + cc.stateCV.Broadcast() if cc.ready != nil { close(cc.ready) cc.ready = nil diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh index 4e7a9e75d0c..b0094888429 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh +++ b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh @@ -8,7 +8,7 @@ # plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have # not, please install them first. # -# We recommend running this script at $GOPATH or $GOPATH/src. +# We recommend running this script at $GOPATH/src. # # If this is not what you need, feel free to make your own scripts. Again, this # script is for demonstration purpose. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go index 9c5fff027e0..3cec7e49168 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go @@ -47,14 +47,11 @@ import ( "time" "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" ) var ( // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2-14", "h2-15", "h2-16"} + alpnProtoStr = []string{"h2"} ) // Credentials defines the common interface all supported credentials must @@ -63,11 +60,15 @@ type Credentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other - // context. When supported by the underlying implementation, ctx can - // be used for timeout and cancellation. + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. - GetRequestMetadata(ctx context.Context) (map[string]string, error) + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentails requires + // transport security. + RequireTransportSecurity() bool } // ProtocolInfo provides information regarding the gRPC wire protocol version, @@ -81,26 +82,57 @@ type ProtocolInfo struct { SecurityVersion string } +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +type authInfoKey struct{} + +// NewContext creates a new context with authInfo attached. +func NewContext(ctx context.Context, authInfo AuthInfo) context.Context { + return context.WithValue(ctx, authInfoKey{}, authInfo) +} + +// FromContext returns the authInfo in ctx if it exists. +func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) { + authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo) + return +} + // TransportAuthenticator defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportAuthenticator interface { // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. - ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, error) - // ServerHandshake does the authentication handshake for servers. - ServerHandshake(rawConn net.Conn) (net.Conn, error) + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportAuthenticator. Info() ProtocolInfo Credentials } +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + state tls.ConnectionState +} + +func (t TLSInfo) AuthType() string { + return "tls" +} + // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration config tls.Config } -func (c *tlsCreds) Info() ProtocolInfo { +func (c tlsCreds) Info() ProtocolInfo { return ProtocolInfo{ SecurityProtocol: "tls", SecurityVersion: "1.2", @@ -109,17 +141,21 @@ func (c *tlsCreds) Info() ProtocolInfo { // GetRequestMetadata returns nil, nil since TLS credentials does not have // metadata. -func (c *tlsCreds) GetRequestMetadata(ctx context.Context) (map[string]string, error) { +func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { return nil, nil } +func (c *tlsCreds) RequireTransportSecurity() bool { + return true +} + type timeoutError struct{} func (timeoutError) Error() string { return "credentials: Dial timed out" } func (timeoutError) Timeout() bool { return true } func (timeoutError) Temporary() bool { return true } -func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, err error) { +func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { // borrow some code from tls.DialWithDialer var errChannel chan error if timeout != 0 { @@ -146,18 +182,20 @@ func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.D } if err != nil { rawConn.Close() - return nil, err + return nil, nil, err } - return conn, nil + // TODO(zhaoq): Omit the auth info for client now. It is more for + // information than anything else. + return conn, nil, nil } -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, error) { +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { conn := tls.Server(rawConn, &c.config) if err := conn.Handshake(); err != nil { rawConn.Close() - return nil, err + return nil, nil, err } - return conn, nil + return conn, TLSInfo{conn.ConnectionState()}, nil } // NewTLS uses c to construct a TransportAuthenticator based on TLS. @@ -199,72 +237,3 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, err } return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } - -// TokenSource supplies credentials from an oauth2.TokenSource. -type TokenSource struct { - oauth2.TokenSource -} - -// GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context) (map[string]string, error) { - token, err := ts.Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -// NewComputeEngine constructs the credentials that fetches access tokens from -// Google Compute Engine (GCE)'s metadata server. It is only valid to use this -// if your program is running on a GCE instance. -// TODO(dsymonds): Deprecate and remove this. -func NewComputeEngine() Credentials { - return TokenSource{google.ComputeTokenSource("")} -} - -// serviceAccount represents credentials via JWT signing key. -type serviceAccount struct { - config *jwt.Config -} - -func (s serviceAccount) GetRequestMetadata(ctx context.Context) (map[string]string, error) { - token, err := s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -// NewServiceAccountFromKey constructs the credentials using the JSON key slice -// from a Google Developers service account. -func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (Credentials, error) { - config, err := google.JWTConfigFromJSON(jsonKey, scope...) - if err != nil { - return nil, err - } - return serviceAccount{config: config}, nil -} - -// NewServiceAccountFromFile constructs the credentials using the JSON key file -// of a Google Developers service account. -func NewServiceAccountFromFile(keyFile string, scope ...string) (Credentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewServiceAccountFromKey(jsonKey, scope...) -} - -// NewApplicationDefault returns "Application Default Credentials". For more -// detail, see https://developers.google.com/accounts/docs/application-default-credentials. -func NewApplicationDefault(ctx context.Context, scope ...string) (Credentials, error) { - t, err := google.DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return TokenSource{t}, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go b/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 00000000000..04943fdf03b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,177 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "fmt" + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies credentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +func NewJWTAccessFromKey(jsonKey []byte) (credentials.Credentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies credentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the credentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.Credentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.TokenType + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the credentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.Credentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents credentials via JWT signing key. +type serviceAccount struct { + config *jwt.Config +} + +func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (s serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the credentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Credentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the credentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Credentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.Credentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md new file mode 100644 index 00000000000..e5c03c38e0d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md @@ -0,0 +1,53 @@ +gRPC in 3 minutes (Go) +====================== + +BACKGROUND +------------- +For this sample, we've already generated the server and client stubs from [helloworld.proto](examples/helloworld/proto/helloworld.proto). + +PREREQUISITES +------------- + +- This requires Go 1.4 +- Requires that [GOPATH is set](https://golang.org/doc/code.html#GOPATH) +```sh +$ go help gopath +$ # ensure the PATH contains $GOPATH/bin +$ export PATH=$PATH:$GOPATH/bin +``` + +INSTALL +------- + +```sh +$ go get -u github.com/grpc/grpc-go/examples/greeter_client +$ go get -u github.com/grpc/grpc-go/examples/greeter_server +``` + +TRY IT! +------- + +- Run the server +```sh +$ greeter_server & +``` + +- Run the client +```sh +$ greeter_client +``` + +OPTIONAL - Rebuilding the generated code +---------------------------------------- + +1 First [install protoc](https://github.com/google/protobuf/blob/master/INSTALL.txt) + - For now, this needs to be installed from source + - This is will change once proto3 is officially released + +2 Install the protoc Go plugin. +```sh +$ go get -a github.com/golang/protobuf/protoc-gen-go +$ +$ # from this dir; invoke protoc +$ protoc -I ./helloworld/proto/ ./helloworld/proto/helloworld.proto --go_out=plugins=grpc:helloworld +``` diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md new file mode 100644 index 00000000000..8df15a4a4e3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md @@ -0,0 +1,431 @@ +#gRPC Basics: Go + +This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to: + +- Define a service in a .proto file. +- Generate server and client code using the protocol buffer compiler. +- Use the Go gRPC API to write a simple client and server for your service. + +It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, which is currently in alpha release:you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. + +This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. + +## Why use gRPC? + +Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients. + +With gRPC we can define our service once in a .proto file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. + +## Example code and setup + +The example code for our tutorial is in [grpc/grpc-go/examples/route_guide](https://github.com/grpc/grpc-go/tree/master/examples/route_guide). To download the example, clone the `grpc-go` repository by running the following command: +```shell +$ go get google.golang.org/grpc +``` + +Then change your current directory to `grpc-go/examples/route_guide`: +```shell +$ cd $GOPATH/src/google.golang.org/grpc/examples/route_guide +``` + +You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Go quick start guide](examples/). + + +## Defining the service + +Our first step (as you'll know from [Getting started](https://github.com/grpc/grpc/tree/master/examples)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). You can see the complete .proto file in [`examples/route_guide/proto/route_guide.proto`](examples/route_guide/proto/route_guide.proto). + +To define a service, you specify a named `service` in your .proto file: + +```proto +service RouteGuide { + ... +} +``` + +Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service: + +- A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call. +```proto + // Obtains the feature at a given position. + rpc GetFeature(Point) returns (Feature) {} +``` + +- A *server-side streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in our example, you specify a server-side streaming method by placing the `stream` keyword before the *response* type. +```proto + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} +``` + +- A *client-side streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a client-side streaming method by placing the `stream` keyword before the *request* type. +```proto + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} +``` + +- A *bidirectional streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response. +```proto + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +``` + +Our .proto file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: +```proto +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} +``` + + +## Generating client and server code + +Next we need to generate the gRPC client and server interfaces from our .proto service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. + +For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first): + +```shell +$ codegen.sh route_guide.proto +``` + +which actually runs: + +```shell +$ protoc --go_out=plugins=grpc:. route_guide.proto +``` + +Running this command generates the following file in your current directory: +- `route_guide.pb.go` + +This contains: +- All the protocol buffer code to populate, serialize, and retrieve our request and response message types +- An interface type (or *stub*) for clients to call with the methods defined in the `RouteGuide` service. +- An interface type for servers to implement, also with the methods defined in the `RouteGuide` service. + + + +## Creating the server + +First let's look at how we create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!). + +There are two parts to making our `RouteGuide` service do its job: +- Implementing the service interface generated from our service definition: doing the actual "work" of our service. +- Running a gRPC server to listen for requests from clients and dispatch them to the right service implementation. + +You can find our example `RouteGuide` server in [grpc-go/examples/route_guide/server/server.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/server/server.go). Let's take a closer look at how it works. + +### Implementing RouteGuide + +As you can see, our server has a `routeGuideServer` struct type that implements the generated `RouteGuideServer` interface: + +```go +type routeGuideServer struct { + ... +} +... + +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + ... +} +... + +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + ... +} +... + +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + ... +} +... + +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + ... +} +... +``` + +#### Simple RPC +`routeGuideServer` implements all our service methods. Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`. + +```go +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{"", point}, nil +} +``` + +The method is passed a context object for the RPC and the client's `Point` protocol buffer request. It returns a `Feature` protocol buffer object with the response information and an `error`. In the method we populate the `Feature` with the appropriate information, and then `return` it along with an `nil` error to tell gRPC that we've finished dealing with the RPC and that the `Feature` can be returned to the client. + +#### Server-side streaming RPC +Now let's look at one of our streaming RPCs. `ListFeatures` is a server-side streaming RPC, so we need to send back multiple `Feature`s to our client. + +```go +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} +``` + +As you can see, instead of getting simple request and response objects in our method parameters, this time we get a request object (the `Rectangle` in which our client wants to find `Feature`s) and a special `RouteGuide_ListFeaturesServer` object to write our responses. + +In the method, we populate as many `Feature` objects as we need to return, writing them to the `RouteGuide_ListFeaturesServer` using its `Send()` method. Finally, as in our simple RPC, we return a `nil` error to tell gRPC that we've finished writing responses. Should any error happen in this call, we return a non-`nil` error; the gRPC layer will translate it into an appropriate RPC status to be sent on the wire. + +#### Client-side streaming RPC +Now let's look at something a little more complicated: the client-side streaming method `RecordRoute`, where we get a stream of `Point`s from the client and return a single `RouteSummary` with information about their trip. As you can see, this time the method doesn't have a request parameter at all. Instead, it gets a `RouteGuide_RecordRouteServer` stream, which the server can use to both read *and* write messages - it can receive client messages using its `Recv()` method and return its single response using its `SendAndClose()` method. + +```go +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} +``` + +In the method body we use the `RouteGuide_RecordRouteServer`s `Recv()` method to repeatedly read in our client's requests to a request object (in this case a `Point`) until there are no more messages: the server needs to check the the error returned from `Read()` after each call. If this is `nil`, the stream is still good and it can continue reading; if it's `io.EOF` the message stream has ended and the server can return its `RouteSummary`. If it has any other value, we return the error "as is" so that it'll be translated to an RPC status by the gRPC layer. + +#### Bidirectional streaming RPC +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. + +```go +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + ... // look for notes to be sent to client + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} +``` + +This time we get a `RouteGuide_RouteChatServer` stream that, as in our client-side streaming example, can be used to read and write messages. However, this time we return values via our method's stream while the client is still writing messages to *their* message stream. + +The syntax for reading and writing here is very similar to our client-streaming method, except the server uses the stream's `Send()` method rather than `SendAndClose()` because it's writing multiple responses. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +### Starting the server + +Once we've implemented all our methods, we also need to start up a gRPC server so that clients can actually use our service. The following snippet shows how we do this for our `RouteGuide` service: + +```go +flag.Parse() +lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) +if err != nil { + log.Fatalf("failed to listen: %v", err) +} +grpcServer := grpc.NewServer() +pb.RegisterRouteGuideServer(grpcServer, &routeGuideServer{}) +... // determine whether to use TLS +grpcServer.Serve(lis) +``` +To build and start a server, we: + +1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))`. +2. Create an instance of the gRPC server using `grpc.NewServer()`. +3. Register our service implementation with the gRPC server. +4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called. + + +## Creating the client + +In this section, we'll look at creating a Go client for our `RouteGuide` service. You can see our complete example client code in [grpc-go/examples/route_guide/client/client.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/client/client.go). + +### Creating a stub + +To call service methods, we first need to create a gRPC *channel* to communicate with the server. We create this by passing the server address and port number to `grpc.Dial()` as follows: + +```go +conn, err := grpc.Dial(*serverAddr) +if err != nil { + ... +} +defer conn.Close() +``` + +You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service. + +Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our .proto. + +```go +client := pb.NewRouteGuideClient(conn) +``` + +### Calling service methods + +Now let's look at how we call our service methods. Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode, which means that the RPC call waits for the server to respond, and will either return a response or an error. + +#### Simple RPC + +Calling the simple RPC `GetFeature` is nearly as straightforward as calling a local method. + +```go +feature, err := client.GetFeature(context.Background(), &pb.Point{409146138, -746188906}) +if err != nil { + ... +} +``` + +As you can see, we call the method on the stub we got earlier. In our method parameters we create and populate a request protocol buffer object (in our case `Point`). We also pass a `context.Context` object which lets us change our RPC's behaviour if necessary, such as time-out/cancel an RPC in flight. If the call doesn't return an error, then we can read the response information from the server from the first return value. + +```go +log.Println(feature) +``` + +#### Server-side streaming RPC + +Here's where we call the server-side streaming method `ListFeatures`, which returns a stream of geographical `Feature`s. If you've already read [Creating the server](#server) some of this may look very familiar - streaming RPCs are implemented in a similar way on both sides. + +```go +rect := &pb.Rectangle{ ... } // initialize a pb.Rectangle +stream, err := client.ListFeatures(context.Background(), rect) +if err != nil { + ... +} +for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + log.Println(feature) +} +``` + +As in the simple RPC, we pass the method a context and a request. However, instead of getting a response object back, we get back an instance of `RouteGuide_ListFeaturesClient`. The client can use the `RouteGuide_ListFeaturesClient` stream to read the server's responses. + +We use the `RouteGuide_ListFeaturesClient`'s `Recv()` method to repeatedly read in the server's responses to a response protocol buffer object (in this case a `Feature`) until there are no more messages: the client needs to check the error `err` returned from `Recv()` after each call. If `nil`, the stream is still good and it can continue reading; if it's `io.EOF` then the message stream has ended; otherwise there must be an RPC error, which is passed over through `err`. + +#### Client-side streaming RPC + +The client-side streaming method `RecordRoute` is similar to the server-side method, except that we only pass the method a context and get a `RouteGuide_RecordRouteClient` stream back, which we can use to both write *and* read messages. + +```go +// Create a random number of random points +r := rand.New(rand.NewSource(time.Now().UnixNano())) +pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points +var points []*pb.Point +for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) +} +log.Printf("Traversing %d points.", len(points)) +stream, err := client.RecordRoute(context.Background()) +if err != nil { + log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) +} +for _, point := range points { + if err := stream.Send(point); err != nil { + log.Fatalf("%v.Send(%v) = %v", stream, point, err) + } +} +reply, err := stream.CloseAndRecv() +if err != nil { + log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) +} +log.Printf("Route summary: %v", reply) +``` + +The `RouteGuide_RecordRouteClient` has a `Send()` method that we can use to send requests to the server. Once we've finished writing our client's requests to the stream using `Send()`, we need to call `CloseAndRecv()` on the stream to let gRPC know that we've finished writing and are expecting to receive a response. We get our RPC status from the `err` returned from `CloseAndRecv()`. If the status is `nil`, then the first return value from `CloseAndRecv()` will be a valid server response. + +#### Bidirectional streaming RPC + +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. As in the case of `RecordRoute`, we only pass the method a context object and get back a stream that we can use to both write and read messages. However, this time we return values via our method's stream while the server is still writing messages to *their* message stream. + +```go +stream, err := client.RouteChat(context.Background()) +waitc := make(chan struct{}) +go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + log.Fatalf("Failed to receive a note : %v", err) + } + log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } +}() +for _, note := range notes { + if err := stream.Send(note); err != nil { + log.Fatalf("Failed to send a note: %v", err) + } +} +stream.CloseSend() +<-waitc +``` + +The syntax for reading and writing here is very similar to our client-side streaming method, except we use the stream's `CloseSend()` method once we've finished our call. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +## Try it out! + +To compile and run the server, assuming you are in the folder +`$GOPATH/src/google.golang.org/grpc/examples/route_guide`, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go new file mode 100644 index 00000000000..1e02ab154da --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "os" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const ( + address = "localhost:50051" + defaultName = "world" +) + +func main() { + // Set up a connection to the server. + conn, err := grpc.Dial(address, grpc.WithInsecure()) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + // Contact the server and print out its response. + name := defaultName + if len(os.Args) > 1 { + name = os.Args[1] + } + r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + log.Printf("Greeting: %s", r.Message) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go new file mode 100644 index 00000000000..ba985df4c24 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "net" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const ( + port = ":50051" +) + +// server is used to implement hellowrld.GreeterServer. +type server struct{} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return &pb.HelloReply{Message: "Hello " + in.Name}, nil +} + +func main() { + lis, err := net.Listen("tcp", port) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + s.Serve(lis) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go new file mode 100644 index 00000000000..1ff931a3844 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go. +// source: helloworld.proto +// DO NOT EDIT! + +/* +Package helloworld is a generated protocol buffer package. + +It is generated from these files: + helloworld.proto + +It has these top-level messages: + HelloRequest + HelloReply +*/ +package helloworld + +import proto "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +// The request message containing the user's name. +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} + +// The response message containing the greetings +type HelloReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *HelloReply) Reset() { *m = HelloReply{} } +func (m *HelloReply) String() string { return proto.CompactTextString(m) } +func (*HelloReply) ProtoMessage() {} + +func init() { +} + +// Client API for Greeter service + +type GreeterClient interface { + // Sends a greeting + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterServer interface { + // Sends a greeting + SayHello(context.Context, *HelloRequest) (*HelloReply, error) +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(HelloRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(GreeterServer).SayHello(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto new file mode 100644 index 00000000000..7d58870a708 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto @@ -0,0 +1,51 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +option java_package = "io.grpc.examples"; +option objc_class_prefix = "HLW"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md index 02a43f1976d..7571621dae7 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md @@ -2,7 +2,7 @@ The route guide server and client demonstrate how to use grpc go libraries to perform unary, client streaming, server streaming and full duplex RPCs. -Please refer to [Getting Started Guide for Go] (https://github.com/grpc/grpc-common/blob/master/go/gotutorial.md) for more information. +Please refer to [Getting Started Guide for Go] (examples/gotutorial.md) for more information. See the definition of the route guide service in proto/route_guide.proto. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go index 51c6c1206eb..a96c0302ced 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go @@ -46,7 +46,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" - pb "google.golang.org/grpc/examples/route_guide/proto" + pb "google.golang.org/grpc/examples/route_guide/routeguide" "google.golang.org/grpc/grpclog" ) @@ -175,6 +175,8 @@ func main() { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) } conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go similarity index 99% rename from Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go rename to Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go index 5851e023f9b..fcf5c7484ff 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go @@ -15,7 +15,7 @@ It has these top-level messages: RouteNote RouteSummary */ -package proto +package routeguide import proto1 "github.com/golang/protobuf/proto" diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto similarity index 99% rename from Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto rename to Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto index 5ea4fcf5b63..bee7ac51ab3 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto @@ -29,7 +29,7 @@ syntax = "proto3"; -package proto; +package routeguide; // Interface exported by the server. service RouteGuide { diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go index c33234e2b4e..09b3942d191 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go @@ -53,9 +53,9 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" - proto "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/examples/route_guide/proto" + pb "google.golang.org/grpc/examples/route_guide/routeguide" ) var ( diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md b/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md index 36fe0bd0d33..f80cbbdb273 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md @@ -1,6 +1,6 @@ # Authentication -As outlined here gRPC supports a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. +As outlined here gRPC supports a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. # Enabling TLS on a gRPC client @@ -26,13 +26,13 @@ server.Serve(lis) ## Google Compute Engine (GCE) ```Go -conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(credentials.NewComputeEngine()))) +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(oauth.NewComputeEngine()))) ``` ## JWT ```Go -jwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) +jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { log.Fatalf("Failed to create JWT credentials: %v", err) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go new file mode 100644 index 00000000000..53e3c539fb0 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package glogger defines glog-based logging for grpc. +*/ +package glogger + +import ( + "github.com/golang/glog" + "google.golang.org/grpc/grpclog" +) + +func init() { + grpclog.SetLogger(&glogger{}) +} + +type glogger struct{} + +func (g *glogger) Fatal(args ...interface{}) { + glog.Fatal(args...) +} + +func (g *glogger) Fatalf(format string, args ...interface{}) { + glog.Fatalf(format, args...) +} + +func (g *glogger) Fatalln(args ...interface{}) { + glog.Fatalln(args...) +} + +func (g *glogger) Print(args ...interface{}) { + glog.Info(args...) +} + +func (g *glogger) Printf(format string, args ...interface{}) { + glog.Infof(format, args...) +} + +func (g *glogger) Println(args ...interface{}) { + glog.Infoln(args...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go index 882c0f6cd7a..cc6e27c0649 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go @@ -32,26 +32,17 @@ */ /* -Package log defines logging for grpc. +Package grpclog defines logging for grpc. */ package grpclog import ( "log" "os" - - "github.com/golang/glog" ) -var ( - // GLogger is a Logger that uses glog. This is the default logger. - GLogger Logger = &glogger{} - - // StdLogger is a Logger that uses golang's standard logger. - StdLogger Logger = log.New(os.Stderr, "", log.LstdFlags) - - logger = GLogger -) +// Use golang's standard logger by default. +var logger Logger = log.New(os.Stderr, "", log.LstdFlags) // Logger mimics golang's standard Logger as an interface. type Logger interface { @@ -73,12 +64,12 @@ func Fatal(args ...interface{}) { logger.Fatal(args...) } -// Fatal is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. +// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. func Fatalf(format string, args ...interface{}) { logger.Fatalf(format, args...) } -// Fatal is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. +// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. func Fatalln(args ...interface{}) { logger.Fatalln(args...) } @@ -97,29 +88,3 @@ func Printf(format string, args ...interface{}) { func Println(args ...interface{}) { logger.Println(args...) } - -type glogger struct{} - -func (g *glogger) Fatal(args ...interface{}) { - glog.Fatal(args...) -} - -func (g *glogger) Fatalf(format string, args ...interface{}) { - glog.Fatalf(format, args...) -} - -func (g *glogger) Fatalln(args ...interface{}) { - glog.Fatalln(args...) -} - -func (g *glogger) Print(args ...interface{}) { - glog.Info(args...) -} - -func (g *glogger) Printf(format string, args ...interface{}) { - glog.Infof(format, args...) -} - -func (g *glogger) Println(args ...interface{}) { - glog.Infoln(args...) -} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go new file mode 100644 index 00000000000..6bfbe49730b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go @@ -0,0 +1,129 @@ +// Code generated by protoc-gen-go. +// source: health.proto +// DO NOT EDIT! + +/* +Package grpc_health_v1alpha is a generated protocol buffer package. + +It is generated from these files: + health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1alpha + +import proto "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,2,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1alpha.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} + +func init() { + proto.RegisterEnum("grpc.health.v1alpha.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Client API for HealthCheck service + +type HealthCheckClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthCheckClient struct { + cc *grpc.ClientConn +} + +func NewHealthCheckClient(cc *grpc.ClientConn) HealthCheckClient { + return &healthCheckClient{cc} +} + +func (c *healthCheckClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1alpha.HealthCheck/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for HealthCheck service + +type HealthCheckServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthCheckServer(s *grpc.Server, srv HealthCheckServer) { + s.RegisterService(&_HealthCheck_serviceDesc, srv) +} + +func _HealthCheck_Check_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(HealthCheckRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(HealthCheckServer).Check(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _HealthCheck_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1alpha.HealthCheck", + HandlerType: (*HealthCheckServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _HealthCheck_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto new file mode 100644 index 00000000000..1ca5bbc1693 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package grpc.health.v1alpha; + +message HealthCheckRequest { + string service = 2; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service HealthCheck{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/health.go b/Godeps/_workspace/src/google.golang.org/grpc/health/health.go new file mode 100644 index 00000000000..7930bde7f3d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/health.go @@ -0,0 +1,49 @@ +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1alpha" +) + +type HealthServer struct { + mu sync.Mutex + // statusMap stores the serving status of the services this HealthServer monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +func NewHealthServer() *HealthServer { + return &HealthServer{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +func (s *HealthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, grpc.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *HealthServer) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go index 1525c8bf13c..4f715d35ee7 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go @@ -40,12 +40,16 @@ import ( "net" "strconv" "strings" + "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/metadata" @@ -67,10 +71,15 @@ var ( client_streaming : request streaming with single response; server_streaming : single request with response streaming; ping_pong : full-duplex streaming; + empty_stream : full-duplex streaming with zero message; + timeout_on_sleeping_server: fullduplex streaming; compute_engine_creds: large_unary with compute engine auth; - service_account_creds: large_unary with service account auth; - cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; - cancel_after_first_response: cancellation after receiving 1st message from the server.`) + service_account_creds: large_unary with service account auth; + jwt_token_creds: large_unary with jwt token auth; + per_rpc_creds: large_unary with per rpc token; + oauth2_auth_token: large_unary with oauth2 token auth; + cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; + cancel_after_first_response: cancellation after receiving 1st message from the server.`) ) var ( @@ -244,6 +253,44 @@ func doPingPong(tc testpb.TestServiceClient) { grpclog.Println("Pingpong done") } +func doEmptyStream(tc testpb.TestServiceClient) { + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) + } + grpclog.Println("Emptystream done") +} + +func doTimeoutOnSleepingServer(tc testpb.TestServiceClient) { + ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + if grpc.Code(err) == codes.DeadlineExceeded { + grpclog.Println("TimeoutOnSleepingServer done") + return + } + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { + grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) + } + grpclog.Println("TimeoutOnSleepingServer done") +} + func doComputeEngineCreds(tc testpb.TestServiceClient) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ @@ -301,10 +348,96 @@ func doServiceAccountCreds(tc testpb.TestServiceClient) { grpclog.Println("ServiceAccountCreds done") } +func doJWTTokenCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey() + user := reply.GetUsername() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + grpclog.Println("JWTtokenCreds done") +} + +func getToken() *oauth2.Token { + jsonKey := getServiceAccountJSONKey() + config, err := google.JWTConfigFromJSON(jsonKey, *oauthScope) + if err != nil { + grpclog.Fatalf("Failed to get the config: %v", err) + } + token, err := config.TokenSource(context.Background()).Token() + if err != nil { + grpclog.Fatalf("Failed to get the token: %v", err) + } + return token +} + +func doOauth2TokenCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey() + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("Oauth2TokenCreds done") +} + +func doPerRPCCreds(tc testpb.TestServiceClient) { + jsonKey := getServiceAccountJSONKey() + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + token := getToken() + kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} + ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) + reply, err := tc.UnaryCall(ctx, req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("PerRPCCreds done") +} + var ( testMetadata = metadata.MD{ - "key1": "value1", - "key2": "value2", + "key1": []string{"value1"}, + "key2": []string{"value2"}, } ) @@ -373,14 +506,24 @@ func main() { } opts = append(opts, grpc.WithTransportCredentials(creds)) if *testCase == "compute_engine_creds" { - opts = append(opts, grpc.WithPerRPCCredentials(credentials.NewComputeEngine())) + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) } else if *testCase == "service_account_creds" { - jwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) + jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { grpclog.Fatalf("Failed to create JWT credentials: %v", err) } opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "jwt_token_creds" { + jwtCreds, err := oauth.NewJWTAccessFromFile(*serviceAccountKeyFile) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "oauth2_auth_token" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(getToken()))) } + } else { + opts = append(opts, grpc.WithInsecure()) } conn, err := grpc.Dial(serverAddr, opts...) if err != nil { @@ -399,6 +542,10 @@ func main() { doServerStreaming(tc) case "ping_pong": doPingPong(tc) + case "empty_stream": + doEmptyStream(tc) + case "timeout_on_sleeping_server": + doTimeoutOnSleepingServer(tc) case "compute_engine_creds": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") @@ -409,6 +556,21 @@ func main() { grpclog.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") } doServiceAccountCreds(tc) + case "jwt_token_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute jwt_token_creds test case.") + } + doJWTTokenCreds(tc) + case "per_rpc_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute per_rpc_creds test case.") + } + doPerRPCCreds(tc) + case "oauth2_auth_token": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute oauth2_auth_token test case.") + } + doOauth2TokenCreds(tc) case "cancel_after_begin": doCancelAfterBegin(tc) case "cancel_after_first_response": diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go index 261ba1cc277..b25e98b8e43 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-go. -// source: src/google.golang.org/grpc/test/grpc_testing/test.proto +// source: test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - src/google.golang.org/grpc/test/grpc_testing/test.proto + test.proto It has these top-level messages: Empty diff --git a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go index aa12b1d5736..5f26abaeacb 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go @@ -64,7 +64,7 @@ func encodeKeyValue(k, v string) (string, string) { if isASCII(v) { return k, v } - key := k + binHdrSuffix + key := strings.ToLower(k + binHdrSuffix) val := base64.StdEncoding.EncodeToString([]byte(v)) return key, string(val) } @@ -85,14 +85,14 @@ func DecodeKeyValue(k, v string) (string, string, error) { // MD is a mapping from metadata keys to values. Users should use the following // two convenience functions New and Pairs to generate MD. -type MD map[string]string +type MD map[string][]string // New creates a MD from given key-value map. func New(m map[string]string) MD { md := MD{} for k, v := range m { key, val := encodeKeyValue(k, v) - md[key] = val + md[key] = append(md[key], val) } return md } @@ -111,7 +111,7 @@ func Pairs(kv ...string) MD { continue } key, val := encodeKeyValue(k, s) - md[key] = val + md[key] = append(md[key], val) } return md } @@ -125,7 +125,9 @@ func (md MD) Len() int { func (md MD) Copy() MD { out := MD{} for k, v := range md { - out[k] = v + for _, i := range v { + out[k] = append(out[k], i) + } } return out } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go b/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go new file mode 100644 index 00000000000..915e22717e8 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go @@ -0,0 +1,145 @@ +package etcd + +import ( + "log" + "sync" + + etcdcl "github.com/coreos/etcd/client" + "golang.org/x/net/context" + "google.golang.org/grpc/naming" +) + +type kv struct { + key, value string +} + +// recvBuffer is an unbounded channel of *kv to record all the pending changes from etcd server. +type recvBuffer struct { + c chan *kv + mu sync.Mutex + stopping bool + backlog []*kv +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan *kv, 1), + } + return b +} + +func (b *recvBuffer) put(r *kv) { + b.mu.Lock() + defer b.mu.Unlock() + if b.stopping { + return + } + b.backlog = append(b.backlog, r) + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } +} + +func (b *recvBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if b.stopping || len(b.backlog) == 0 { + return + } + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } +} + +func (b *recvBuffer) get() <-chan *kv { + return b.c +} + +// stop terminates the recvBuffer. After it is called, the recvBuffer is not usable any more. +func (b *recvBuffer) stop() { + b.mu.Lock() + b.stopping = true + close(b.c) + b.mu.Unlock() +} + +type etcdNR struct { + kAPI etcdcl.KeysAPI + recv *recvBuffer + ctx context.Context + cancel context.CancelFunc +} + +// NewETCDNR creates an etcd NameResolver. +func NewETCDNR(cfg etcdcl.Config) (naming.Resolver, error) { + c, err := etcdcl.New(cfg) + if err != nil { + return nil, err + } + kAPI := etcdcl.NewKeysAPI(c) + ctx, cancel := context.WithCancel(context.Background()) + return &etcdNR{ + kAPI: kAPI, + recv: newRecvBuffer(), + ctx: ctx, + cancel: cancel, + }, nil +} + +// getNode builds the resulting key-value map starting from node recursively. +func getNode(node *etcdcl.Node, res map[string]string) { + if !node.Dir { + res[node.Key] = node.Value + return + } + for _, val := range node.Nodes { + getNode(val, res) + } +} + +func (nr *etcdNR) Get(target string) map[string]string { + resp, err := nr.kAPI.Get(nr.ctx, target, &etcdcl.GetOptions{Recursive: true, Sort: true}) + if err != nil { + log.Printf("etcdNR.Get(_) stopped: %v", err) + return nil + } + res := make(map[string]string) + getNode(resp.Node, res) + return res +} + +func (nr *etcdNR) Watch(target string) { + watcher := nr.kAPI.Watcher(target, &etcdcl.WatcherOptions{Recursive: true}) + for { + resp, err := watcher.Next(nr.ctx) + if err != nil { + log.Printf("etcdNR.Watch(_) stopped: %v", err) + break + } + if resp.Node.Dir { + continue + } + entry := &kv{key: resp.Node.Key, value: resp.Node.Value} + nr.recv.put(entry) + } +} + +func (nr *etcdNR) GetUpdate() (string, string) { + i := <-nr.recv.get() + nr.recv.load() + if i == nil { + return "", "" + } + // returns key and the corresponding value of the updated kv pair + return i.key, i.value + +} + +func (nr *etcdNR) Stop() { + nr.recv.stop() + nr.cancel() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go new file mode 100644 index 00000000000..a6a319f7507 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,13 @@ +package naming + +// Resolver dose name resolution and watches for the resolution changes. +type Resolver interface { + // Get gets a snapshot of the current name resolution results for target. + Get(target string) map[string]string + // Watch watches for the name resolution changes on target. It blocks until Stop() is invoked. The watch results are obtained via GetUpdate(). + Watch(target string) + // GetUpdate returns a name resolution change when watch is triggered. It blocks until it observes a change. The caller needs to call it again to get the next change. + GetUpdate() (string, string) + // Stop shuts down the NameResolver. + Stop() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go index 49512603bbc..a0f0b48bb44 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go @@ -277,28 +277,29 @@ func convertCode(err error) codes.Code { const ( // how long to wait after the first failure before retrying baseDelay = 1.0 * time.Second - // upper bound on backoff delay - maxDelay = 120 * time.Second - backoffFactor = 2.0 // backoff increases by this factor on each retry - backoffRange = 0.4 // backoff is randomized downwards by this factor + // upper bound of backoff delay + maxDelay = 120 * time.Second + // backoff increases by this factor on each retry + backoffFactor = 1.6 + // backoff is randomized downwards by this factor + backoffJitter = 0.2 ) -// backoff returns a value in [0, maxDelay] that increases exponentially with -// retries, starting from baseDelay. -func backoff(retries int) time.Duration { +func backoff(retries int) (t time.Duration) { + if retries == 0 { + return baseDelay + } backoff, max := float64(baseDelay), float64(maxDelay) for backoff < max && retries > 0 { - backoff = backoff * backoffFactor + backoff *= backoffFactor retries-- } if backoff > max { backoff = max } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. We just subtract up - // to 40% so that we obey maxDelay. - backoff -= backoff * backoffRange * rand.Float64() + // the same time, they won't operate in lockstep. + backoff *= 1 + backoffJitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/server.go b/Godeps/_workspace/src/google.golang.org/grpc/server.go index 53559de051c..feb29887513 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/server.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/server.go @@ -43,6 +43,7 @@ import ( "sync" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -139,17 +140,20 @@ func NewServer(opt ...ServerOption) *Server { // server. Called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - s.mu.Lock() - defer s.mu.Unlock() - // Does some sanity checks. - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) - } ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) if !st.Implements(ht) { grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } srv := &service{ server: ss, md: make(map[string]*MethodDesc), @@ -195,8 +199,9 @@ func (s *Server) Serve(lis net.Listener) error { if err != nil { return err } + var authInfo credentials.AuthInfo if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { - c, err = creds.ServerHandshake(c) + c, authInfo, err = creds.ServerHandshake(c) if err != nil { grpclog.Println("grpc: Server.Serve failed to complete security handshake.") continue @@ -208,7 +213,7 @@ func (s *Server) Serve(lis net.Listener) error { c.Close() return nil } - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams) + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) if err != nil { s.mu.Unlock() c.Close() @@ -244,13 +249,26 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return t.Write(stream, p, opts) } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) { +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) (err error) { + var traceInfo traceInfo + if EnableTracing { + traceInfo.tr = trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + defer traceInfo.tr.Finish() + traceInfo.firstLine.client = false + traceInfo.tr.LazyLog(&traceInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + traceInfo.tr.SetError() + } + }() + } p := &parser{s: stream} for { pf, req, err := p.recvMsg() if err == io.EOF { // The entire stream is done (for unary RPC only). - return + return err } if err != nil { switch err := err.(type) { @@ -263,7 +281,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) } - return + return err + } + if traceInfo.tr != nil { + traceInfo.tr.LazyLog(&payload{sent: false, msg: req}, true) } switch pf { case compressionNone: @@ -280,38 +301,59 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err } - return + return nil } opts := &transport.Options{ Last: true, Delay: false, } if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { - if _, ok := err.(transport.ConnectionError); ok { - return - } - if e, ok := err.(transport.StreamError); ok { - statusCode = e.Code - statusDesc = e.Desc - } else { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: statusCode = codes.Unknown statusDesc = err.Error() } + return err } - t.WriteStatus(stream, statusCode, statusDesc) + if traceInfo.tr != nil { + traceInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) default: panic(fmt.Sprintf("payload format to be supported: %d", pf)) } } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) { +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) (err error) { ss := &serverStream{ - t: t, - s: stream, - p: &parser{s: stream}, - codec: s.opts.codec, + t: t, + s: stream, + p: &parser{s: stream}, + codec: s.opts.codec, + tracing: EnableTracing, + } + if ss.tracing { + ss.traceInfo.tr = trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ss.traceInfo.firstLine.client = false + ss.traceInfo.tr.LazyLog(&ss.traceInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + ss.traceInfo.tr.Finish() + ss.traceInfo.tr = nil + ss.mu.Unlock() + }() } if appErr := sd.Handler(srv.server, ss); appErr != nil { if err, ok := appErr.(rpcError); ok { @@ -322,7 +364,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.statusDesc = appErr.Error() } } - t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { diff --git a/Godeps/_workspace/src/google.golang.org/grpc/stream.go b/Godeps/_workspace/src/google.golang.org/grpc/stream.go index 43fdcbecb53..5c99bffc638 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/stream.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/stream.go @@ -36,8 +36,11 @@ package grpc import ( "errors" "io" + "sync" + "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" @@ -98,6 +101,19 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Host: cc.authority, Method: method, } + cs := &clientStream{ + desc: desc, + codec: cc.dopts.codec, + tracing: EnableTracing, + } + if cs.tracing { + cs.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + cs.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) + } t, _, err := cc.wait(ctx, 0) if err != nil { return nil, toRPCErr(err) @@ -106,13 +122,10 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if err != nil { return nil, toRPCErr(err) } - return &clientStream{ - t: t, - s: s, - p: &parser{s: s}, - desc: desc, - codec: cc.dopts.codec, - }, nil + cs.t = t + cs.s = s + cs.p = &parser{s: s} + return cs, nil } // clientStream implements a client side Stream. @@ -122,6 +135,13 @@ type clientStream struct { p *parser desc *StreamDesc codec Codec + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex // protects traceInfo + // traceInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + traceInfo traceInfo } func (cs *clientStream) Context() context.Context { @@ -143,6 +163,13 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.traceInfo.tr != nil { + cs.traceInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } defer func() { if err == nil || err == io.EOF { return @@ -161,7 +188,20 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { func (cs *clientStream) RecvMsg(m interface{}) (err error) { err = recv(cs.p, cs.codec, m) + defer func() { + // err != nil indicates the termination of the stream. + if err != nil { + cs.finish(err) + } + }() if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.traceInfo.tr != nil { + cs.traceInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } if !cs.desc.ClientStreams || cs.desc.ServerStreams { return } @@ -204,6 +244,24 @@ func (cs *clientStream) CloseSend() (err error) { return } +func (cs *clientStream) finish(err error) { + if !cs.tracing { + return + } + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.traceInfo.tr != nil { + if err == nil || err == io.EOF { + cs.traceInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.traceInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.traceInfo.tr.SetError() + } + cs.traceInfo.tr.Finish() + cs.traceInfo.tr = nil + } +} + // ServerStream defines the interface a server stream has to satisfy. type ServerStream interface { // SendHeader sends the header metadata. It should not be called @@ -224,6 +282,13 @@ type serverStream struct { codec Codec statusCode codes.Code statusDesc string + + tracing bool // set to EnableTracing when the serverStream is created. + + mu sync.Mutex // protects traceInfo + // traceInfo.tr is set when the serverStream is created (if EnableTracing is true), + // and is set to nil when the serverStream's finish method is called. + traceInfo traceInfo } func (ss *serverStream) Context() context.Context { @@ -242,7 +307,20 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { return } -func (ss *serverStream) SendMsg(m interface{}) error { +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.tracing { + ss.mu.Lock() + if err == nil { + ss.traceInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + + ss.mu.Unlock() + } + }() out, err := encode(ss.codec, m, compressionNone) if err != nil { err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) @@ -251,6 +329,18 @@ func (ss *serverStream) SendMsg(m interface{}) error { return ss.t.Write(ss.s, out, &transport.Options{Last: false}) } -func (ss *serverStream) RecvMsg(m interface{}) error { +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.tracing { + ss.mu.Lock() + if err == nil { + ss.traceInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + ss.mu.Unlock() + } + }() return recv(ss.p, ss.codec, m) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go index 261ba1cc277..b25e98b8e43 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-go. -// source: src/google.golang.org/grpc/test/grpc_testing/test.proto +// source: test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - src/google.golang.org/grpc/test/grpc_testing/test.proto + test.proto It has these top-level messages: Empty diff --git a/Godeps/_workspace/src/google.golang.org/grpc/trace.go b/Godeps/_workspace/src/google.golang.org/grpc/trace.go new file mode 100644 index 00000000000..246357406ad --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/trace.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing = true + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return fmt.Sprintf("sent: %v", p.msg) + } else { + return fmt.Sprintf("recv: %v", p.msg) + } +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go index 306b5851828..464bf10a360 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go @@ -61,8 +61,8 @@ func (windowUpdate) isItem() bool { } type settings struct { - ack bool - setting []http2.Setting + ack bool + ss []http2.Setting } func (settings) isItem() bool { @@ -104,8 +104,14 @@ type quotaPool struct { // newQuotaPool creates a quotaPool which has quota q available to consume. func newQuotaPool(q int) *quotaPool { - qb := "aPool{c: make(chan int, 1)} - qb.c <- q + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } return qb } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go index 6ba93448908..40b76408217 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go @@ -39,6 +39,7 @@ import ( "io" "math" "net" + "strings" "sync" "time" @@ -53,9 +54,11 @@ import ( // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - target string // server name/addr - conn net.Conn // underlying communication channel - nextID uint32 // the next stream ID to be used + target string // server name/addr + userAgent string + conn net.Conn // underlying communication channel + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used // writableChan synchronizes write access to the transport. // A writer acquires the write lock by sending a value on writableChan @@ -79,6 +82,8 @@ type http2Client struct { fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool + // streamsQuota limits the max number of concurrent streams. + streamsQuota *quotaPool // The scheme used: https if TLS is on, http otherwise. scheme string @@ -89,7 +94,7 @@ type http2Client struct { state transportState // the state of underlying connection activeStreams map[uint32]*Stream // The max number of concurrent streams - maxStreams uint32 + maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 } @@ -111,6 +116,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e if connErr != nil { return nil, ConnectionErrorf("transport: %v", connErr) } + var authInfo credentials.AuthInfo for _, c := range opts.AuthOptions { if ccreds, ok := c.(credentials.TransportAuthenticator); ok { scheme = "https" @@ -121,7 +127,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e if timeout > 0 { timeout -= time.Since(startT) } - conn, connErr = ccreds.ClientHandshake(addr, conn, timeout) + conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) break } } @@ -156,10 +162,16 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e return nil, ConnectionErrorf("transport: %v", err) } } + ua := primaryUA + if opts.UserAgent != "" { + ua = opts.UserAgent + " " + ua + } var buf bytes.Buffer t := &http2Client{ - target: addr, - conn: conn, + target: addr, + userAgent: ua, + conn: conn, + authInfo: authInfo, // The client initiated stream id is odd starting from 1. nextID: 1, writableChan: make(chan int, 1), @@ -174,8 +186,8 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e scheme: scheme, state: reachable, activeStreams: make(map[uint32]*Stream), - maxStreams: math.MaxUint32, authCreds: opts.AuthOptions, + maxStreams: math.MaxInt32, streamSendQuota: defaultWindowSize, } go t.controller() @@ -226,9 +238,26 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ContextErr(context.DeadlineExceeded) } } + // Attach Auth info if there is any. + if t.authInfo != nil { + ctx = credentials.NewContext(ctx, t.authInfo) + } authData := make(map[string]string) for _, c := range t.authCreds { - data, err := c.GetRequestMetadata(ctx) + // Construct URI required to get auth request metadata. + var port string + if pos := strings.LastIndex(t.target, ":"); pos != -1 { + // Omit port if it is the default one. + if t.target[pos+1:] != "443" { + port = ":" + t.target[pos+1:] + } + } + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + } + audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + data, err := c.GetRequestMetadata(ctx, audience) if err != nil { return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) } @@ -236,7 +265,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea authData[k] = v } } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + checkStreamsQuota := t.streamsQuota != nil + t.mu.Unlock() + if checkStreamsQuota { + sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) + } + } if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { + // t.streamsQuota will be updated when t.CloseStream is invoked. return nil, err } t.mu.Lock() @@ -244,11 +291,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.mu.Unlock() return nil, ErrConnClosing } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.writableChan <- 0 - return nil, StreamErrorf(codes.Unavailable, "transport: failed to create new stream because the limit has been reached.") - } s := t.newStream(ctx, callHdr) t.activeStreams[s.id] = s t.mu.Unlock() @@ -261,7 +303,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } @@ -275,7 +319,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } } first := true @@ -316,9 +362,16 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { + var updateStreams bool t.mu.Lock() + if t.streamsQuota != nil { + updateStreams = true + } delete(t.activeStreams, s.id) t.mu.Unlock() + if updateStreams { + t.streamsQuota.add(1) + } s.mu.Lock() if q := s.fc.restoreConn(); q > 0 { t.controlBuf.put(&windowUpdate{0, q}) @@ -503,30 +556,46 @@ func (t *http2Client) handleData(f *http2.DataFrame) { return } size := len(f.Data()) - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) - return - } - s.mu.Lock() - if s.state == streamDone { + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + t.notifyError(err) + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.statusCode = codes.Internal + s.statusDesc = err.Error() s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } - s.state = streamDone + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + s.mu.Lock() + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } s.statusCode = codes.Internal - s.statusDesc = err.Error() + s.statusDesc = "server closed the stream without sending trailers" s.mu.Unlock() s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { @@ -540,6 +609,10 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { return } s.state = streamDone + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) @@ -552,24 +625,13 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } + var ss []http2.Setting f.ForeachSetting(func(s http2.Setting) error { - if v, ok := f.Value(s.ID); ok { - t.mu.Lock() - defer t.mu.Unlock() - switch s.ID { - case http2.SettingMaxConcurrentStreams: - t.maxStreams = v - case http2.SettingInitialWindowSize: - for _, s := range t.activeStreams { - // Adjust the sending quota for each s. - s.sendQuotaPool.reset(int(v - t.streamSendQuota)) - } - t.streamSendQuota = v - } - } + ss = append(ss, s) return nil }) - t.controlBuf.put(&settings{ack: true}) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) } func (t *http2Client) handlePing(f *http2.PingFrame) { @@ -577,7 +639,7 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - // TODO(zhaoq): GoAwayFrame handler to be implemented" + // TODO(zhaoq): GoAwayFrame handler to be implemented } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -697,6 +759,39 @@ func (t *http2Client) reader() { } } +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + reset := t.streamsQuota != nil + if !reset { + t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) + } + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + if reset { + t.streamsQuota.reset(int(s.Val) - ms) + } + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + } +} + // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Client) controller() { @@ -712,8 +807,9 @@ func (t *http2Client) controller() { case *settings: if i.ack { t.framer.writeSettingsAck(true) + t.applySettings(i.ss) } else { - t.framer.writeSettings(true, i.setting...) + t.framer.writeSettings(true, i.ss...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go index d9c0e11174a..8856d7f4c10 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go @@ -46,6 +46,7 @@ import ( "github.com/bradfitz/http2/hpack" "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) @@ -57,7 +58,8 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { conn net.Conn - maxStreamID uint32 // max stream ID ever seen + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection // writableChan synchronizes write access to the transport. // A writer acquires the write lock by sending a value on writableChan // and releases it by receiving from writableChan. @@ -88,11 +90,9 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. -func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err error) { +func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { framer := newFramer(conn) // Send initial settings as connection preface to client. - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. var settings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. @@ -116,6 +116,7 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err er var buf bytes.Buffer t := &http2Server{ conn: conn, + authInfo: authInfo, framer: framer, hBuf: &buf, hEnc: hpack.NewEncoder(&buf), @@ -183,6 +184,10 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } + // Attach Auth info if there is any. + if t.authInfo != nil { + s.ctx = credentials.NewContext(s.ctx, t.authInfo) + } // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). @@ -324,22 +329,24 @@ func (t *http2Server) handleData(f *http2.DataFrame) { return } size := len(f.Data()) - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - grpclog.Printf("transport: http2Server %v", err) - t.Close() + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } - t.closeStream(s) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. s.mu.Lock() @@ -367,18 +374,13 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } + var ss []http2.Setting f.ForeachSetting(func(s http2.Setting) error { - if v, ok := f.Value(http2.SettingInitialWindowSize); ok { - t.mu.Lock() - defer t.mu.Unlock() - for _, s := range t.activeStreams { - s.sendQuotaPool.reset(int(v - t.streamSendQuota)) - } - t.streamSendQuota = v - } + ss = append(ss, s) return nil }) - t.controlBuf.put(&settings{ack: true}) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) } func (t *http2Server) handlePing(f *http2.PingFrame) { @@ -445,7 +447,9 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) for k, v := range md { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } if err := t.writeHeaders(s, t.hBuf, false); err != nil { return err @@ -478,7 +482,9 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) // Attach the trailer metadata. for k, v := range s.trailer { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } if err := t.writeHeaders(s, t.hBuf, true); err != nil { t.Close() @@ -584,6 +590,20 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + defer t.mu.Unlock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + } + + } +} + // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Server) controller() { @@ -599,8 +619,9 @@ func (t *http2Server) controller() { case *settings: if i.ack { t.framer.writeSettingsAck(true) + t.applySettings(i.ss) } else { - t.framer.writeSettings(true, i.setting...) + t.framer.writeSettings(true, i.ss...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go index 2babe729c2d..ac3c47551a1 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go @@ -39,6 +39,7 @@ import ( "io" "net" "strconv" + "strings" "sync/atomic" "time" @@ -50,6 +51,8 @@ import ( ) const ( + // The primary user agent + primaryUA = "grpc-go/0.7" // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues @@ -59,32 +62,30 @@ const ( ) var ( - clientPreface = []byte(http2.ClientPreface) + clientPreface = []byte(http2.ClientPreface) + http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } ) -var http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.Internal, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, -} - -var statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, // pick an arbitrary one which is matched. - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, -} - // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { @@ -97,7 +98,7 @@ type decodeState struct { timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string]string + mdata map[string][]string } // An hpackDecoder decodes HTTP2 headers which may span multiple frames. @@ -128,8 +129,7 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "te", - "user-agent": + "te": return true default: return false @@ -161,15 +161,24 @@ func newHPACKDecoder() *hpackDecoder { d.state.method = f.Value default: if !isReservedHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] + } if d.state.mdata == nil { - d.state.mdata = make(map[string]string) + d.state.mdata = make(map[string][]string) } k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) if err != nil { grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) return } - d.state.mdata[k] = v + d.state.mdata[k] = append(d.state.mdata[k], v) } } }) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go index 4b52d4a2cdb..e20a6d99232 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go @@ -169,10 +169,9 @@ type Stream struct { ctx context.Context cancel context.CancelFunc // method records the associated RPC method of the stream. - method string - buf *recvBuffer - dec io.Reader - + method string + buf *recvBuffer + dec io.Reader fc *inFlow recvQuota uint32 // The accumulated inbound quota pending for window update. @@ -309,15 +308,20 @@ const ( // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. -func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32) (ServerTransport, error) { - return newHTTP2Server(conn, maxStreams) +func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { + return newHTTP2Server(conn, maxStreams, authInfo) } // ConnectOptions covers all relevant options for dialing a server. type ConnectOptions struct { - Dialer func(string, time.Duration) (net.Conn, error) + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(string, time.Duration) (net.Conn, error) + // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. AuthOptions []credentials.Credentials - Timeout time.Duration + // Timeout specifies the timeout for dialing a client connection. + Timeout time.Duration } // NewClientTransport establishes the transport with the required ConnectOptions diff --git a/pkg/kubelet/rkt/fake_rkt_interface.go b/pkg/kubelet/rkt/fake_rkt_interface.go new file mode 100644 index 00000000000..c7a0a87df04 --- /dev/null +++ b/pkg/kubelet/rkt/fake_rkt_interface.go @@ -0,0 +1,123 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + "strconv" + "sync" + + "github.com/coreos/go-systemd/dbus" + rktapi "github.com/coreos/rkt/api/v1alpha" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// fakeRktInterface mocks the rktapi.PublicAPIClient interface for testing purpose. +type fakeRktInterface struct { + sync.Mutex + info rktapi.Info + called []string + err error +} + +func newFakeRktInterface() *fakeRktInterface { + return &fakeRktInterface{} +} + +func (f *fakeRktInterface) CleanCalls() { + f.Lock() + defer f.Unlock() + f.called = nil +} + +func (f *fakeRktInterface) GetInfo(ctx context.Context, in *rktapi.GetInfoRequest, opts ...grpc.CallOption) (*rktapi.GetInfoResponse, error) { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "GetInfo") + return &rktapi.GetInfoResponse{&f.info}, f.err +} + +func (f *fakeRktInterface) ListPods(ctx context.Context, in *rktapi.ListPodsRequest, opts ...grpc.CallOption) (*rktapi.ListPodsResponse, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRktInterface) InspectPod(ctx context.Context, in *rktapi.InspectPodRequest, opts ...grpc.CallOption) (*rktapi.InspectPodResponse, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRktInterface) ListImages(ctx context.Context, in *rktapi.ListImagesRequest, opts ...grpc.CallOption) (*rktapi.ListImagesResponse, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRktInterface) InspectImage(ctx context.Context, in *rktapi.InspectImageRequest, opts ...grpc.CallOption) (*rktapi.InspectImageResponse, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRktInterface) ListenEvents(ctx context.Context, in *rktapi.ListenEventsRequest, opts ...grpc.CallOption) (rktapi.PublicAPI_ListenEventsClient, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRktInterface) GetLogs(ctx context.Context, in *rktapi.GetLogsRequest, opts ...grpc.CallOption) (rktapi.PublicAPI_GetLogsClient, error) { + return nil, fmt.Errorf("Not implemented") +} + +// fakeSystemd mocks the systemdInterface for testing purpose. +// TODO(yifan): Remove this once we have a package for launching rkt pods. +// See https://github.com/coreos/rkt/issues/1769. +type fakeSystemd struct { + sync.Mutex + called []string + version string + err error +} + +func newFakeSystemd() *fakeSystemd { + return &fakeSystemd{} +} + +func (f *fakeSystemd) CleanCalls() { + f.Lock() + defer f.Unlock() + f.called = nil +} + +func (f *fakeSystemd) Version() (systemdVersion, error) { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "Version") + v, _ := strconv.Atoi(f.version) + return systemdVersion(v), f.err +} + +func (f *fakeSystemd) ListUnits() ([]dbus.UnitStatus, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeSystemd) StopUnit(name, mode string) (string, error) { + return "", fmt.Errorf("Not implemented") +} + +func (f *fakeSystemd) RestartUnit(name, mode string) (string, error) { + return "", fmt.Errorf("Not implemented") +} + +func (f *fakeSystemd) Reload() error { + return fmt.Errorf("Not implemented") +} diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 84acf4ffc80..c5195fbbce0 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -30,10 +30,12 @@ import ( "syscall" "time" + "google.golang.org/grpc" + appcschema "github.com/appc/spec/schema" appctypes "github.com/appc/spec/schema/types" - "github.com/coreos/go-systemd/dbus" "github.com/coreos/go-systemd/unit" + rktapi "github.com/coreos/rkt/api/v1alpha" "github.com/docker/docker/pkg/parsers" docker "github.com/fsouza/go-dockerclient" "github.com/golang/glog" @@ -53,10 +55,11 @@ import ( const ( RktType = "rkt" - acVersion = "0.7.1" - minimumRktVersion = "0.9.0" - recommendRktVersion = "0.9.0" - systemdMinimumVersion = "219" + minimumAppcVersion = "0.7.1" + minimumRktBinVersion = "0.9.0" + recommendedRktBinVersion = "0.9.0" + minimumRktApiVersion = "1.0.0-alpha" + minimumSystemdVersion = "219" systemdServiceDir = "/run/systemd/system" rktDataDir = "/var/lib/rkt" @@ -73,14 +76,18 @@ const ( authDir = "auth.d" dockerAuthTemplate = `{"rktKind":"dockerAuth","rktVersion":"v1","registries":[%q],"credentials":{"user":%q,"password":%q}}` - defaultImageTag = "latest" + defaultImageTag = "latest" + defaultRktAPIServiceAddr = "localhost:15441" ) // Runtime implements the Containerruntime for rkt. The implementation // uses systemd, so in order to run this runtime, systemd must be installed // on the machine. type Runtime struct { - systemd *dbus.Conn + systemd systemdInterface + // The grpc client for rkt api-service. + apisvcConn *grpc.ClientConn + apisvc rktapi.PublicAPIClient // The absolute path to rkt binary. rktBinAbsPath string config *Config @@ -93,6 +100,12 @@ type Runtime struct { livenessManager proberesults.Manager volumeGetter volumeGetter imagePuller kubecontainer.ImagePuller + + // Versions + binVersion rktVersion + apiVersion rktVersion + appcVersion rktVersion + systemdVersion systemdVersion } var _ kubecontainer.Runtime = &Runtime{} @@ -114,21 +127,16 @@ func New(config *Config, imageBackOff *util.Backoff, serializeImagePulls bool, ) (*Runtime, error) { - systemdVersion, err := getSystemdVersion() + // Create dbus connection. + systemd, err := newSystemd() if err != nil { - return nil, err - } - result, err := systemdVersion.Compare(systemdMinimumVersion) - if err != nil { - return nil, err - } - if result < 0 { - return nil, fmt.Errorf("rkt: systemd version is too old, requires at least %v", systemdMinimumVersion) + return nil, fmt.Errorf("rkt: cannot create systemd interface: %v", err) } - systemd, err := dbus.New() + // TODO(yifan): Use secure connection. + apisvcConn, err := grpc.Dial(defaultRktAPIServiceAddr, grpc.WithInsecure()) if err != nil { - return nil, fmt.Errorf("cannot connect to dbus: %v", err) + return nil, fmt.Errorf("rkt: cannot connect to rkt api service: %v", err) } rktBinAbsPath := config.Path @@ -144,6 +152,8 @@ func New(config *Config, rkt := &Runtime{ systemd: systemd, rktBinAbsPath: rktBinAbsPath, + apisvcConn: apisvcConn, + apisvc: rktapi.NewPublicAPIClient(apisvcConn), config: config, dockerKeyring: credentialprovider.NewDockerKeyring(), containerRefManager: containerRefManager, @@ -158,28 +168,13 @@ func New(config *Config, rkt.imagePuller = kubecontainer.NewImagePuller(recorder, rkt, imageBackOff) } - // Test the rkt version. - version, err := rkt.Version() - if err != nil { + if err := rkt.checkVersion(minimumRktBinVersion, recommendedRktBinVersion, minimumAppcVersion, minimumRktApiVersion, minimumSystemdVersion); err != nil { + // TODO(yifan): Latest go-systemd version have the ability to close the + // dbus connection. However the 'docker/libcontainer' package is using + // the older go-systemd version, so we can't update the go-systemd version. + rkt.apisvcConn.Close() return nil, err } - result, err = version.Compare(minimumRktVersion) - if err != nil { - return nil, err - } - if result < 0 { - return nil, fmt.Errorf("rkt: version is too old, requires at least %v", minimumRktVersion) - } - - result, err = version.Compare(recommendRktVersion) - if err != nil { - return nil, err - } - if result != 0 { - // TODO(yifan): Record an event to expose the information. - glog.Warningf("rkt: current version %q is not recommended (recommended version %q)", version, recommendRktVersion) - } - return rkt, nil } @@ -880,33 +875,8 @@ func (r *Runtime) Type() string { return RktType } -// Version invokes 'rkt version' to get the version information of the rkt -// runtime on the machine. -// The return values are an int array containers the version number. -// -// Example: -// rkt:0.3.2+git --> []int{0, 3, 2}. -// func (r *Runtime) Version() (kubecontainer.Version, error) { - output, err := r.runCommand("version") - if err != nil { - return nil, err - } - - // Example output for 'rkt version': - // rkt version 0.3.2+git - // appc version 0.3.0+git - for _, line := range output { - tuples := strings.Split(strings.TrimSpace(line), " ") - if len(tuples) != 3 { - glog.Warningf("rkt: cannot parse the output: %q.", line) - continue - } - if tuples[0] == "rkt" { - return parseVersion(tuples[2]) - } - } - return nil, fmt.Errorf("rkt: cannot determine the version") + return r.binVersion, nil } // TODO(yifan): This is very racy, unefficient, and unsafe, we need to provide diff --git a/pkg/kubelet/rkt/rkt_test.go b/pkg/kubelet/rkt/rkt_test.go new file mode 100644 index 00000000000..471d795fd85 --- /dev/null +++ b/pkg/kubelet/rkt/rkt_test.go @@ -0,0 +1,146 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + "testing" + + rktapi "github.com/coreos/rkt/api/v1alpha" + "github.com/stretchr/testify/assert" +) + +func TestCheckVersion(t *testing.T) { + fr := newFakeRktInterface() + fs := newFakeSystemd() + r := &Runtime{apisvc: fr, systemd: fs} + + fr.info = rktapi.Info{ + RktVersion: "1.2.3+git", + AppcVersion: "1.2.4+git", + ApiVersion: "1.2.6-alpha", + } + fs.version = "100" + tests := []struct { + minimumRktBinVersion string + recommendedRktBinVersion string + minimumAppcVersion string + minimumRktApiVersion string + minimumSystemdVersion string + err error + calledGetInfo bool + calledSystemVersion bool + }{ + // Good versions. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.5", + "99", + nil, + true, + true, + }, + // Good versions. + { + "1.2.3+git", + "1.2.3+git", + "1.2.4+git", + "1.2.6-alpha", + "100", + nil, + true, + true, + }, + // Requires greater binary version. + { + "1.2.4", + "1.2.4", + "1.2.4", + "1.2.6-alpha", + "100", + fmt.Errorf("rkt: binary version is too old(%v), requires at least %v", fr.info.RktVersion, "1.2.4"), + true, + true, + }, + // Requires greater Appc version. + { + "1.2.3", + "1.2.3", + "1.2.5", + "1.2.6-alpha", + "100", + fmt.Errorf("rkt: appc version is too old(%v), requires at least %v", fr.info.AppcVersion, "1.2.5"), + true, + true, + }, + // Requires greater API version. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.6", + "100", + fmt.Errorf("rkt: API version is too old(%v), requires at least %v", fr.info.ApiVersion, "1.2.6"), + true, + true, + }, + // Requires greater API version. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.7", + "100", + fmt.Errorf("rkt: API version is too old(%v), requires at least %v", fr.info.ApiVersion, "1.2.7"), + true, + true, + }, + // Requires greater systemd version. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.7", + "101", + fmt.Errorf("rkt: systemd version(%v) is too old, requires at least %v", fs.version, "101"), + false, + true, + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + err := r.checkVersion(tt.minimumRktBinVersion, tt.recommendedRktBinVersion, tt.minimumAppcVersion, tt.minimumRktApiVersion, tt.minimumSystemdVersion) + assert.Equal(t, err, tt.err, testCaseHint) + + if tt.calledGetInfo { + assert.Equal(t, fr.called, []string{"GetInfo"}, testCaseHint) + } + if tt.calledSystemVersion { + assert.Equal(t, fs.called, []string{"Version"}, testCaseHint) + } + if err == nil { + assert.Equal(t, r.binVersion.String(), fr.info.RktVersion, testCaseHint) + assert.Equal(t, r.appcVersion.String(), fr.info.AppcVersion, testCaseHint) + assert.Equal(t, r.apiVersion.String(), fr.info.ApiVersion, testCaseHint) + } + fr.CleanCalls() + fs.CleanCalls() + } +} diff --git a/pkg/kubelet/rkt/systemd.go b/pkg/kubelet/rkt/systemd.go new file mode 100644 index 00000000000..0534d2148b4 --- /dev/null +++ b/pkg/kubelet/rkt/systemd.go @@ -0,0 +1,103 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/coreos/go-systemd/dbus" +) + +// systemdVersion is a type wraps the int to implement kubecontainer.Version interface. +type systemdVersion int + +func (s systemdVersion) String() string { + return fmt.Sprintf("%d", s) +} + +func (s systemdVersion) Compare(other string) (int, error) { + v, err := strconv.Atoi(other) + if err != nil { + return -1, err + } + if int(s) < v { + return -1, nil + } else if int(s) > v { + return 1, nil + } + return 0, nil +} + +// systemdInterface is an abstraction of the go-systemd/dbus to make +// it mockable for testing. +// TODO(yifan): Eventually we should move these functionalities to: +// 1. a package for launching/stopping rkt pods. +// 2. rkt api-service interface for listing pods. +// See https://github.com/coreos/rkt/issues/1769. +type systemdInterface interface { + // Version returns the version of the systemd. + Version() (systemdVersion, error) + // ListUnits lists all the loaded units. + ListUnits() ([]dbus.UnitStatus, error) + // StopUnits stops the unit with the given name. + StopUnit(name, mode string) (string, error) + // StopUnits restarts the unit with the given name. + RestartUnit(name, mode string) (string, error) + // Reload is equivalent to 'systemctl daemon-reload'. + Reload() error +} + +// systemd implements the systemdInterface using dbus and systemctl. +// All the functions other then Version() are already implemented by go-systemd/dbus. +type systemd struct { + *dbus.Conn +} + +// newSystemd creates a systemd object that implements systemdInterface. +func newSystemd() (*systemd, error) { + dbusConn, err := dbus.New() + if err != nil { + return nil, err + } + return &systemd{dbusConn}, nil +} + +// Version returns the version of the systemd. +func (s *systemd) Version() (systemdVersion, error) { + output, err := exec.Command("systemctl", "--version").Output() + if err != nil { + return -1, err + } + // Example output of 'systemctl --version': + // + // systemd 215 + // +PAM +AUDIT +SELINUX +IMA +SYSVINIT +LIBCRYPTSETUP +GCRYPT +ACL +XZ -SECCOMP -APPARMOR + // + lines := strings.Split(string(output), "\n") + tuples := strings.Split(lines[0], " ") + if len(tuples) != 2 { + return -1, fmt.Errorf("rkt: Failed to parse version %v", lines) + } + result, err := strconv.Atoi(string(tuples[1])) + if err != nil { + return -1, err + } + return systemdVersion(result), nil +} diff --git a/pkg/kubelet/rkt/version.go b/pkg/kubelet/rkt/version.go index 593d692c4df..4a6aaed7d77 100644 --- a/pkg/kubelet/rkt/version.go +++ b/pkg/kubelet/rkt/version.go @@ -18,93 +18,111 @@ package rkt import ( "fmt" - "os/exec" - "strconv" - "strings" - appctypes "github.com/appc/spec/schema/types" + "github.com/coreos/go-semver/semver" + rktapi "github.com/coreos/rkt/api/v1alpha" + "github.com/golang/glog" + "golang.org/x/net/context" ) -type rktVersion []int +// rktVersion implementes kubecontainer.Version interface by implementing +// Compare() and String() (which is implemented by the underlying semver.Version) +type rktVersion struct { + *semver.Version +} -func parseVersion(input string) (rktVersion, error) { - nsv, err := appctypes.NewSemVer(input) +func newRktVersion(version string) (rktVersion, error) { + sem, err := semver.NewVersion(version) if err != nil { - return nil, err + return rktVersion{}, err } - return rktVersion{int(nsv.Major), int(nsv.Minor), int(nsv.Patch)}, nil + return rktVersion{sem}, nil } func (r rktVersion) Compare(other string) (int, error) { - v, err := parseVersion(other) + v, err := semver.NewVersion(other) if err != nil { return -1, err } - for i := range r { - if i > len(v)-1 { - return 1, nil - } - if r[i] < v[i] { - return -1, nil - } - if r[i] > v[i] { - return 1, nil - } - } - - // When loop ends, len(r) is <= len(v). - if len(r) < len(v) { + if r.LessThan(*v) { return -1, nil } - return 0, nil -} - -func (r rktVersion) String() string { - var version []string - for _, v := range r { - version = append(version, fmt.Sprintf("%d", v)) - } - return strings.Join(version, ".") -} - -type systemdVersion int - -func (s systemdVersion) String() string { - return fmt.Sprintf("%d", s) -} - -func (s systemdVersion) Compare(other string) (int, error) { - v, err := strconv.Atoi(other) - if err != nil { - return -1, err - } - if int(s) < v { - return -1, nil - } else if int(s) > v { + if v.LessThan(*r.Version) { return 1, nil } return 0, nil } -func getSystemdVersion() (systemdVersion, error) { - output, err := exec.Command("systemctl", "--version").Output() +// checkVersion tests whether the rkt/systemd/rkt-api-service that meet the version requirement. +// If all version requirements are met, it returns nil. +func (r *Runtime) checkVersion(minimumRktBinVersion, recommendedRktBinVersion, minimumAppcVersion, minimumRktApiVersion, minimumSystemdVersion string) error { + // Check systemd version. + var err error + r.systemdVersion, err = r.systemd.Version() if err != nil { - return -1, err + return err } - // Example output of 'systemctl --version': - // - // systemd 215 - // +PAM +AUDIT +SELINUX +IMA +SYSVINIT +LIBCRYPTSETUP +GCRYPT +ACL +XZ -SECCOMP -APPARMOR - // - lines := strings.Split(string(output), "\n") - tuples := strings.Split(lines[0], " ") - if len(tuples) != 2 { - return -1, fmt.Errorf("rkt: Failed to parse version %v", lines) - } - result, err := strconv.Atoi(string(tuples[1])) + result, err := r.systemdVersion.Compare(minimumSystemdVersion) if err != nil { - return -1, err + return err } - return systemdVersion(result), nil + if result < 0 { + return fmt.Errorf("rkt: systemd version(%v) is too old, requires at least %v", r.systemdVersion, minimumSystemdVersion) + } + + // Example for the version strings returned by GetInfo(): + // RktVersion:"0.10.0+gitb7349b1" AppcVersion:"0.7.1" ApiVersion:"1.0.0-alpha" + resp, err := r.apisvc.GetInfo(context.Background(), &rktapi.GetInfoRequest{}) + if err != nil { + return err + } + + // Check rkt binary version. + r.binVersion, err = newRktVersion(resp.Info.RktVersion) + if err != nil { + return err + } + result, err = r.binVersion.Compare(minimumRktBinVersion) + if err != nil { + return err + } + if result < 0 { + return fmt.Errorf("rkt: binary version is too old(%v), requires at least %v", resp.Info.RktVersion, minimumRktBinVersion) + } + result, err = r.binVersion.Compare(recommendedRktBinVersion) + if err != nil { + return err + } + if result != 0 { + // TODO(yifan): Record an event to expose the information. + glog.Warningf("rkt: current binary version %q is not recommended (recommended version %q)", resp.Info.RktVersion, recommendedRktBinVersion) + } + + // Check Appc version. + r.appcVersion, err = newRktVersion(resp.Info.AppcVersion) + if err != nil { + return err + } + result, err = r.appcVersion.Compare(minimumAppcVersion) + if err != nil { + return err + } + if result < 0 { + return fmt.Errorf("rkt: appc version is too old(%v), requires at least %v", resp.Info.AppcVersion, minimumAppcVersion) + } + + // Check rkt API version. + r.apiVersion, err = newRktVersion(resp.Info.ApiVersion) + if err != nil { + return err + } + result, err = r.apiVersion.Compare(minimumRktApiVersion) + if err != nil { + return err + } + if result < 0 { + return fmt.Errorf("rkt: API version is too old(%v), requires at least %v", resp.Info.ApiVersion, minimumRktApiVersion) + } + return nil }