commit
c7083eed5d
@ -89,7 +89,6 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
gopts := []grpc.DialOption{
|
gopts := []grpc.DialOption{
|
||||||
grpc.WithBlock(),
|
grpc.WithBlock(),
|
||||||
grpc.WithInsecure(),
|
grpc.WithInsecure(),
|
||||||
grpc.WithTimeout(60 * time.Second),
|
|
||||||
grpc.FailOnNonTempDialError(true),
|
grpc.FailOnNonTempDialError(true),
|
||||||
grpc.WithBackoffMaxDelay(3 * time.Second),
|
grpc.WithBackoffMaxDelay(3 * time.Second),
|
||||||
grpc.WithDialer(dialer.Dialer),
|
grpc.WithDialer(dialer.Dialer),
|
||||||
@ -109,7 +108,9 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
connector := func() (*grpc.ClientConn, error) {
|
connector := func() (*grpc.ClientConn, error) {
|
||||||
conn, err := grpc.Dial(dialer.DialAddress(address), gopts...)
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||||
}
|
}
|
||||||
|
@ -21,15 +21,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
golog "log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
@ -50,9 +46,6 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Discard grpc logs so that they don't mess with our stdio
|
|
||||||
grpclog.SetLogger(golog.New(ioutil.Discard, "", golog.LstdFlags))
|
|
||||||
|
|
||||||
flag.StringVar(&address, "address", defaultAddress, "The address to the containerd socket for use in the tests")
|
flag.StringVar(&address, "address", defaultAddress, "The address to the containerd socket for use in the tests")
|
||||||
flag.BoolVar(&noDaemon, "no-daemon", false, "Do not start a dedicated daemon for the tests")
|
flag.BoolVar(&noDaemon, "no-daemon", false, "Do not start a dedicated daemon for the tests")
|
||||||
flag.BoolVar(&noCriu, "no-criu", false, "Do not run the checkpoint tests")
|
flag.BoolVar(&noCriu, "no-criu", false, "Do not run the checkpoint tests")
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
gocontext "context"
|
gocontext "context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
golog "log"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@ -50,7 +49,7 @@ high performance container runtime
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Discard grpc logs so that they don't mess with our stdio
|
// Discard grpc logs so that they don't mess with our stdio
|
||||||
grpclog.SetLogger(golog.New(ioutil.Discard, "", golog.LstdFlags))
|
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||||
|
|
||||||
cli.VersionPrinter = func(c *cli.Context) {
|
cli.VersionPrinter = func(c *cli.Context) {
|
||||||
fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)
|
fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)
|
||||||
|
@ -95,12 +95,13 @@ func connect(address string, d func(string, time.Duration) (net.Conn, error)) (*
|
|||||||
gopts := []grpc.DialOption{
|
gopts := []grpc.DialOption{
|
||||||
grpc.WithBlock(),
|
grpc.WithBlock(),
|
||||||
grpc.WithInsecure(),
|
grpc.WithInsecure(),
|
||||||
grpc.WithTimeout(60 * time.Second),
|
|
||||||
grpc.WithDialer(d),
|
grpc.WithDialer(d),
|
||||||
grpc.FailOnNonTempDialError(true),
|
grpc.FailOnNonTempDialError(true),
|
||||||
grpc.WithBackoffMaxDelay(3 * time.Second),
|
grpc.WithBackoffMaxDelay(3 * time.Second),
|
||||||
}
|
}
|
||||||
conn, err := grpc.Dial(dialer.DialAddress(address), gopts...)
|
ctx, cancel := gocontext.WithTimeout(gocontext.Background(), 60*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@ package app
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/cmd/ctr/commands/containers"
|
"github.com/containerd/containerd/cmd/ctr/commands/containers"
|
||||||
"github.com/containerd/containerd/cmd/ctr/commands/content"
|
"github.com/containerd/containerd/cmd/ctr/commands/content"
|
||||||
@ -44,7 +43,7 @@ var extraCmds = []cli.Command{}
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Discard grpc logs so that they don't mess with our stdio
|
// Discard grpc logs so that they don't mess with our stdio
|
||||||
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||||
|
|
||||||
cli.VersionPrinter = func(c *cli.Context) {
|
cli.VersionPrinter = func(c *cli.Context) {
|
||||||
fmt.Println(c.App.Name, version.Package, c.App.Version)
|
fmt.Println(c.App.Name, version.Package, c.App.Version)
|
||||||
|
@ -18,14 +18,14 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
|||||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||||
github.com/gogo/protobuf v1.0.0
|
github.com/gogo/protobuf v1.0.0
|
||||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||||
github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
|
github.com/golang/protobuf v1.1.0
|
||||||
github.com/opencontainers/runtime-spec v1.0.1
|
github.com/opencontainers/runtime-spec v1.0.1
|
||||||
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
|
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
|
||||||
github.com/sirupsen/logrus v1.0.0
|
github.com/sirupsen/logrus v1.0.0
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
github.com/pmezard/go-difflib v1.0.0
|
||||||
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||||
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
||||||
google.golang.org/grpc v1.10.1
|
google.golang.org/grpc v1.12.0
|
||||||
github.com/pkg/errors v0.8.0
|
github.com/pkg/errors v0.8.0
|
||||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||||
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
|
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
|
||||||
|
65
vendor/github.com/golang/protobuf/README.md
generated
vendored
65
vendor/github.com/golang/protobuf/README.md
generated
vendored
@ -7,7 +7,7 @@ Google's data interchange format.
|
|||||||
Copyright 2010 The Go Authors.
|
Copyright 2010 The Go Authors.
|
||||||
https://github.com/golang/protobuf
|
https://github.com/golang/protobuf
|
||||||
|
|
||||||
This package and the code it generates requires at least Go 1.4.
|
This package and the code it generates requires at least Go 1.6.
|
||||||
|
|
||||||
This software implements Go bindings for protocol buffers. For
|
This software implements Go bindings for protocol buffers. For
|
||||||
information about protocol buffers themselves, see
|
information about protocol buffers themselves, see
|
||||||
@ -56,13 +56,49 @@ parameter set to the directory you want to output the Go code to.
|
|||||||
The generated files will be suffixed .pb.go. See the Test code below
|
The generated files will be suffixed .pb.go. See the Test code below
|
||||||
for an example using such a file.
|
for an example using such a file.
|
||||||
|
|
||||||
|
## Packages and input paths ##
|
||||||
|
|
||||||
|
The protocol buffer language has a concept of "packages" which does not
|
||||||
|
correspond well to the Go notion of packages. In generated Go code,
|
||||||
|
each source `.proto` file is associated with a single Go package. The
|
||||||
|
name and import path for this package is specified with the `go_package`
|
||||||
|
proto option:
|
||||||
|
|
||||||
|
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||||
|
|
||||||
|
The protocol buffer compiler will attempt to derive a package name and
|
||||||
|
import path if a `go_package` option is not present, but it is
|
||||||
|
best to always specify one explicitly.
|
||||||
|
|
||||||
|
There is a one-to-one relationship between source `.proto` files and
|
||||||
|
generated `.pb.go` files, but any number of `.pb.go` files may be
|
||||||
|
contained in the same Go package.
|
||||||
|
|
||||||
|
The output name of a generated file is produced by replacing the
|
||||||
|
`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`).
|
||||||
|
However, the output directory is selected in one of two ways. Let
|
||||||
|
us say we have `inputs/x.proto` with a `go_package` option of
|
||||||
|
`github.com/golang/protobuf/p`. The corresponding output file may
|
||||||
|
be:
|
||||||
|
|
||||||
|
- Relative to the import path:
|
||||||
|
|
||||||
|
protoc --go_out=. inputs/x.proto
|
||||||
|
# writes ./github.com/golang/protobuf/p/x.pb.go
|
||||||
|
|
||||||
|
(This can work well with `--go_out=$GOPATH`.)
|
||||||
|
|
||||||
|
- Relative to the input file:
|
||||||
|
|
||||||
|
protoc --go_out=paths=source_relative:. inputs/x.proto
|
||||||
|
# generate ./inputs/x.pb.go
|
||||||
|
|
||||||
|
## Generated code ##
|
||||||
|
|
||||||
The package comment for the proto library contains text describing
|
The package comment for the proto library contains text describing
|
||||||
the interface provided in Go for protocol buffers. Here is an edited
|
the interface provided in Go for protocol buffers. Here is an edited
|
||||||
version.
|
version.
|
||||||
|
|
||||||
==========
|
|
||||||
|
|
||||||
The proto package converts data structures to and from the
|
The proto package converts data structures to and from the
|
||||||
wire format of protocol buffers. It works in concert with the
|
wire format of protocol buffers. It works in concert with the
|
||||||
Go source code generated for .proto files by the protocol compiler.
|
Go source code generated for .proto files by the protocol compiler.
|
||||||
@ -114,9 +150,9 @@ Consider file test.proto, containing
|
|||||||
```proto
|
```proto
|
||||||
syntax = "proto2";
|
syntax = "proto2";
|
||||||
package example;
|
package example;
|
||||||
|
|
||||||
enum FOO { X = 17; };
|
enum FOO { X = 17; };
|
||||||
|
|
||||||
message Test {
|
message Test {
|
||||||
required string label = 1;
|
required string label = 1;
|
||||||
optional int32 type = 2 [default=77];
|
optional int32 type = 2 [default=77];
|
||||||
@ -170,22 +206,25 @@ To create and play with a Test object from the example package,
|
|||||||
To pass extra parameters to the plugin, use a comma-separated
|
To pass extra parameters to the plugin, use a comma-separated
|
||||||
parameter list separated from the output directory by a colon:
|
parameter list separated from the output directory by a colon:
|
||||||
|
|
||||||
|
|
||||||
protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
|
protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
|
||||||
|
|
||||||
|
- `paths=(import | source_relative)` - specifies how the paths of
|
||||||
- `import_prefix=xxx` - a prefix that is added onto the beginning of
|
generated files are structured. See the "Packages and imports paths"
|
||||||
all imports. Useful for things like generating protos in a
|
section above. The default is `import`.
|
||||||
subdirectory, or regenerating vendored protobufs in-place.
|
|
||||||
- `import_path=foo/bar` - used as the package if no input files
|
|
||||||
declare `go_package`. If it contains slashes, everything up to the
|
|
||||||
rightmost slash is ignored.
|
|
||||||
- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
|
- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
|
||||||
load. The only plugin in this repo is `grpc`.
|
load. The only plugin in this repo is `grpc`.
|
||||||
- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
|
- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
|
||||||
associated with Go package quux/shme. This is subject to the
|
associated with Go package quux/shme. This is subject to the
|
||||||
import_prefix parameter.
|
import_prefix parameter.
|
||||||
|
|
||||||
|
The following parameters are deprecated and should not be used:
|
||||||
|
|
||||||
|
- `import_prefix=xxx` - a prefix that is added onto the beginning of
|
||||||
|
all imports.
|
||||||
|
- `import_path=foo/bar` - used as the package if no input files
|
||||||
|
declare `go_package`. If it contains slashes, everything up to the
|
||||||
|
rightmost slash is ignored.
|
||||||
|
|
||||||
## gRPC Support ##
|
## gRPC Support ##
|
||||||
|
|
||||||
If a proto file specifies RPC services, protoc-gen-go can be instructed to
|
If a proto file specifies RPC services, protoc-gen-go can be instructed to
|
||||||
|
46
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
46
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
@ -35,22 +35,39 @@
|
|||||||
package proto
|
package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Clone returns a deep copy of a protocol buffer.
|
// Clone returns a deep copy of a protocol buffer.
|
||||||
func Clone(pb Message) Message {
|
func Clone(src Message) Message {
|
||||||
in := reflect.ValueOf(pb)
|
in := reflect.ValueOf(src)
|
||||||
if in.IsNil() {
|
if in.IsNil() {
|
||||||
return pb
|
return src
|
||||||
}
|
}
|
||||||
|
|
||||||
out := reflect.New(in.Type().Elem())
|
out := reflect.New(in.Type().Elem())
|
||||||
// out is empty so a merge is a deep copy.
|
dst := out.Interface().(Message)
|
||||||
mergeStruct(out.Elem(), in.Elem())
|
Merge(dst, src)
|
||||||
return out.Interface().(Message)
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merger is the interface representing objects that can merge messages of the same type.
|
||||||
|
type Merger interface {
|
||||||
|
// Merge merges src into this message.
|
||||||
|
// Required and optional fields that are set in src will be set to that value in dst.
|
||||||
|
// Elements of repeated fields will be appended.
|
||||||
|
//
|
||||||
|
// Merge may panic if called with a different argument type than the receiver.
|
||||||
|
Merge(src Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generatedMerger is the custom merge method that generated protos will have.
|
||||||
|
// We must add this method since a generate Merge method will conflict with
|
||||||
|
// many existing protos that have a Merge data field already defined.
|
||||||
|
type generatedMerger interface {
|
||||||
|
XXX_Merge(src Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge merges src into dst.
|
// Merge merges src into dst.
|
||||||
@ -58,17 +75,24 @@ func Clone(pb Message) Message {
|
|||||||
// Elements of repeated fields will be appended.
|
// Elements of repeated fields will be appended.
|
||||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||||
func Merge(dst, src Message) {
|
func Merge(dst, src Message) {
|
||||||
|
if m, ok := dst.(Merger); ok {
|
||||||
|
m.Merge(src)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
in := reflect.ValueOf(src)
|
in := reflect.ValueOf(src)
|
||||||
out := reflect.ValueOf(dst)
|
out := reflect.ValueOf(dst)
|
||||||
if out.IsNil() {
|
if out.IsNil() {
|
||||||
panic("proto: nil destination")
|
panic("proto: nil destination")
|
||||||
}
|
}
|
||||||
if in.Type() != out.Type() {
|
if in.Type() != out.Type() {
|
||||||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||||
panic("proto: type mismatch")
|
|
||||||
}
|
}
|
||||||
if in.IsNil() {
|
if in.IsNil() {
|
||||||
// Merging nil into non-nil is a quiet no-op
|
return // Merge from nil src is a noop
|
||||||
|
}
|
||||||
|
if m, ok := dst.(generatedMerger); ok {
|
||||||
|
m.XXX_Merge(src)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mergeStruct(out.Elem(), in.Elem())
|
mergeStruct(out.Elem(), in.Elem())
|
||||||
@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) {
|
|||||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
if emIn, ok := extendable(in.Addr().Interface()); ok {
|
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||||
emOut, _ := extendable(out.Addr().Interface())
|
emOut, _ := extendable(out.Addr().Interface())
|
||||||
mIn, muIn := emIn.extensionsRead()
|
mIn, muIn := emIn.extensionsRead()
|
||||||
if mIn != nil {
|
if mIn != nil {
|
||||||
|
668
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
668
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@ -39,8 +39,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// errOverflow is returned when an integer is too large to be represented.
|
// errOverflow is returned when an integer is too large to be represented.
|
||||||
@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
|
|||||||
// wire type is encountered. It does not get returned to user code.
|
// wire type is encountered. It does not get returned to user code.
|
||||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||||
|
|
||||||
// The fundamental decoders that interpret bytes on the wire.
|
|
||||||
// Those that take integer types all return uint64 and are
|
|
||||||
// therefore of type valueDecoder.
|
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||||
// It returns the integer and the number of bytes consumed, or
|
// It returns the integer and the number of bytes consumed, or
|
||||||
// zero if there is not enough.
|
// zero if there is not enough.
|
||||||
@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
|
||||||
// bytes, embedded messages
|
|
||||||
|
|
||||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||||
// This is the format used for the bytes protocol buffer
|
// This is the format used for the bytes protocol buffer
|
||||||
// type and for embedded messages.
|
// type and for embedded messages.
|
||||||
@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
|||||||
return string(buf), nil
|
return string(buf), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
|
||||||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
|
||||||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
|
||||||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
|
||||||
oi := o.index
|
|
||||||
|
|
||||||
err := o.skip(t, tag, wire)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !unrecField.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ptr := structPointer_Bytes(base, unrecField)
|
|
||||||
|
|
||||||
// Add the skipped field to struct field
|
|
||||||
obuf := o.buf
|
|
||||||
|
|
||||||
o.buf = *ptr
|
|
||||||
o.EncodeVarint(uint64(tag<<3 | wire))
|
|
||||||
*ptr = append(o.buf, obuf[oi:o.index]...)
|
|
||||||
|
|
||||||
o.buf = obuf
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
|
||||||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
|
||||||
|
|
||||||
var u uint64
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch wire {
|
|
||||||
case WireVarint:
|
|
||||||
_, err = o.DecodeVarint()
|
|
||||||
case WireFixed64:
|
|
||||||
_, err = o.DecodeFixed64()
|
|
||||||
case WireBytes:
|
|
||||||
_, err = o.DecodeRawBytes(false)
|
|
||||||
case WireFixed32:
|
|
||||||
_, err = o.DecodeFixed32()
|
|
||||||
case WireStartGroup:
|
|
||||||
for {
|
|
||||||
u, err = o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fwire := int(u & 0x7)
|
|
||||||
if fwire == WireEndGroup {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ftag := int(u >> 3)
|
|
||||||
err = o.skip(t, ftag, fwire)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is the interface representing objects that can
|
// Unmarshaler is the interface representing objects that can
|
||||||
// unmarshal themselves. The method should reset the receiver before
|
// unmarshal themselves. The argument points to data that may be
|
||||||
// decoding starts. The argument points to data that may be
|
|
||||||
// overwritten, so implementations should not keep references to the
|
// overwritten, so implementations should not keep references to the
|
||||||
// buffer.
|
// buffer.
|
||||||
|
// Unmarshal implementations should not clear the receiver.
|
||||||
|
// Any unmarshaled data should be merged into the receiver.
|
||||||
|
// Callers of Unmarshal that do not want to retain existing data
|
||||||
|
// should Reset the receiver before calling Unmarshal.
|
||||||
type Unmarshaler interface {
|
type Unmarshaler interface {
|
||||||
Unmarshal([]byte) error
|
Unmarshal([]byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newUnmarshaler is the interface representing objects that can
|
||||||
|
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||||
|
//
|
||||||
|
// This exists to support protoc-gen-go generated messages.
|
||||||
|
// The proto package will stop type-asserting to this interface in the future.
|
||||||
|
//
|
||||||
|
// DO NOT DEPEND ON THIS.
|
||||||
|
type newUnmarshaler interface {
|
||||||
|
XXX_Unmarshal([]byte) error
|
||||||
|
}
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||||
// decoded result in pb. If the struct underlying pb does not match
|
// decoded result in pb. If the struct underlying pb does not match
|
||||||
// the data in buf, the results can be unpredictable.
|
// the data in buf, the results can be unpredictable.
|
||||||
@ -395,7 +334,13 @@ type Unmarshaler interface {
|
|||||||
// to preserve and append to existing data.
|
// to preserve and append to existing data.
|
||||||
func Unmarshal(buf []byte, pb Message) error {
|
func Unmarshal(buf []byte, pb Message) error {
|
||||||
pb.Reset()
|
pb.Reset()
|
||||||
return UnmarshalMerge(buf, pb)
|
if u, ok := pb.(newUnmarshaler); ok {
|
||||||
|
return u.XXX_Unmarshal(buf)
|
||||||
|
}
|
||||||
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
return u.Unmarshal(buf)
|
||||||
|
}
|
||||||
|
return NewBuffer(buf).Unmarshal(pb)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||||
@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error {
|
|||||||
// UnmarshalMerge merges into existing data in pb.
|
// UnmarshalMerge merges into existing data in pb.
|
||||||
// Most code should use Unmarshal instead.
|
// Most code should use Unmarshal instead.
|
||||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||||
// If the object can unmarshal itself, let it.
|
if u, ok := pb.(newUnmarshaler); ok {
|
||||||
|
return u.XXX_Unmarshal(buf)
|
||||||
|
}
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
// NOTE: The history of proto have unfortunately been inconsistent
|
||||||
|
// whether Unmarshaler should or should not implicitly clear itself.
|
||||||
|
// Some implementations do, most do not.
|
||||||
|
// Thus, calling this here may or may not do what people want.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/protobuf/issues/424
|
||||||
return u.Unmarshal(buf)
|
return u.Unmarshal(buf)
|
||||||
}
|
}
|
||||||
return NewBuffer(buf).Unmarshal(pb)
|
return NewBuffer(buf).Unmarshal(pb)
|
||||||
@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||||
|
// StartGroup tag is already consumed. This function consumes
|
||||||
|
// EndGroup tag.
|
||||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||||
typ, base, err := getbase(pb)
|
b := p.buf[p.index:]
|
||||||
if err != nil {
|
x, y := findEndGroup(b)
|
||||||
return err
|
if x < 0 {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
|
err := Unmarshal(b[:x], pb)
|
||||||
|
p.index += y
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in the
|
// Unmarshal parses the protocol buffer representation in the
|
||||||
@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error {
|
|||||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||||
func (p *Buffer) Unmarshal(pb Message) error {
|
func (p *Buffer) Unmarshal(pb Message) error {
|
||||||
// If the object can unmarshal itself, let it.
|
// If the object can unmarshal itself, let it.
|
||||||
|
if u, ok := pb.(newUnmarshaler); ok {
|
||||||
|
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||||
|
p.index = len(p.buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
// NOTE: The history of proto have unfortunately been inconsistent
|
||||||
|
// whether Unmarshaler should or should not implicitly clear itself.
|
||||||
|
// Some implementations do, most do not.
|
||||||
|
// Thus, calling this here may or may not do what people want.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/protobuf/issues/424
|
||||||
err := u.Unmarshal(p.buf[p.index:])
|
err := u.Unmarshal(p.buf[p.index:])
|
||||||
p.index = len(p.buf)
|
p.index = len(p.buf)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
typ, base, err := getbase(pb)
|
// Slow workaround for messages that aren't Unmarshalers.
|
||||||
if err != nil {
|
// This includes some hand-coded .pb.go files and
|
||||||
return err
|
// bootstrap protos.
|
||||||
}
|
// TODO: fix all of those and then add Unmarshal to
|
||||||
|
// the Message interface. Then:
|
||||||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
// The cast above and code below can be deleted.
|
||||||
|
// The old unmarshaler can be deleted.
|
||||||
if collectStats {
|
// Clients can call Unmarshal directly (can already do that, actually).
|
||||||
stats.Decode++
|
var info InternalMessageInfo
|
||||||
}
|
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||||
|
p.index = len(p.buf)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshalType does the work of unmarshaling a structure.
|
|
||||||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
|
||||||
var state errorState
|
|
||||||
required, reqFields := prop.reqCount, uint64(0)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
for err == nil && o.index < len(o.buf) {
|
|
||||||
oi := o.index
|
|
||||||
var u uint64
|
|
||||||
u, err = o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
wire := int(u & 0x7)
|
|
||||||
if wire == WireEndGroup {
|
|
||||||
if is_group {
|
|
||||||
if required > 0 {
|
|
||||||
// Not enough information to determine the exact field.
|
|
||||||
// (See below.)
|
|
||||||
return &RequiredNotSetError{"{Unknown}"}
|
|
||||||
}
|
|
||||||
return nil // input is satisfied
|
|
||||||
}
|
|
||||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
|
||||||
}
|
|
||||||
tag := int(u >> 3)
|
|
||||||
if tag <= 0 {
|
|
||||||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
|
||||||
}
|
|
||||||
fieldnum, ok := prop.decoderTags.get(tag)
|
|
||||||
if !ok {
|
|
||||||
// Maybe it's an extension?
|
|
||||||
if prop.extendable {
|
|
||||||
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
|
|
||||||
if err = o.skip(st, tag, wire); err == nil {
|
|
||||||
extmap := e.extensionsWrite()
|
|
||||||
ext := extmap[int32(tag)] // may be missing
|
|
||||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
|
||||||
extmap[int32(tag)] = ext
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Maybe it's a oneof?
|
|
||||||
if prop.oneofUnmarshaler != nil {
|
|
||||||
m := structPointer_Interface(base, st).(Message)
|
|
||||||
// First return value indicates whether tag is a oneof field.
|
|
||||||
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
|
|
||||||
if err == ErrInternalBadWireType {
|
|
||||||
// Map the error to something more descriptive.
|
|
||||||
// Do the formatting here to save generated code space.
|
|
||||||
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
p := prop.Prop[fieldnum]
|
|
||||||
|
|
||||||
if p.dec == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dec := p.dec
|
|
||||||
if wire != WireStartGroup && wire != p.WireType {
|
|
||||||
if wire == WireBytes && p.packedDec != nil {
|
|
||||||
// a packable field
|
|
||||||
dec = p.packedDec
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
decErr := dec(o, p, base)
|
|
||||||
if decErr != nil && !state.shouldContinue(decErr, p) {
|
|
||||||
err = decErr
|
|
||||||
}
|
|
||||||
if err == nil && p.Required {
|
|
||||||
// Successfully decoded a required field.
|
|
||||||
if tag <= 64 {
|
|
||||||
// use bitmap for fields 1-64 to catch field reuse.
|
|
||||||
var mask uint64 = 1 << uint64(tag-1)
|
|
||||||
if reqFields&mask == 0 {
|
|
||||||
// new required field
|
|
||||||
reqFields |= mask
|
|
||||||
required--
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This is imprecise. It can be fooled by a required field
|
|
||||||
// with a tag > 64 that is encoded twice; that's very rare.
|
|
||||||
// A fully correct implementation would require allocating
|
|
||||||
// a data structure, which we would like to avoid.
|
|
||||||
required--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
if is_group {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if state.err != nil {
|
|
||||||
return state.err
|
|
||||||
}
|
|
||||||
if required > 0 {
|
|
||||||
// Not enough information to determine the exact field. If we use extra
|
|
||||||
// CPU, we could determine the field only if the missing required field
|
|
||||||
// has a tag <= 64 and we check reqFields.
|
|
||||||
return &RequiredNotSetError{"{Unknown}"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Individual type decoders
|
|
||||||
// For each,
|
|
||||||
// u is the decoded value,
|
|
||||||
// v is a pointer to the field (pointer) in the struct
|
|
||||||
|
|
||||||
// Sizes of the pools to allocate inside the Buffer.
|
|
||||||
// The goal is modest amortization and allocation
|
|
||||||
// on at least 16-byte boundaries.
|
|
||||||
const (
|
|
||||||
boolPoolSize = 16
|
|
||||||
uint32PoolSize = 8
|
|
||||||
uint64PoolSize = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
// Decode a bool.
|
|
||||||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(o.bools) == 0 {
|
|
||||||
o.bools = make([]bool, boolPoolSize)
|
|
||||||
}
|
|
||||||
o.bools[0] = u != 0
|
|
||||||
*structPointer_Bool(base, p.field) = &o.bools[0]
|
|
||||||
o.bools = o.bools[1:]
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_BoolVal(base, p.field) = u != 0
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode an int32.
|
|
||||||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode an int64.
|
|
||||||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word64_Set(structPointer_Word64(base, p.field), o, u)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a string.
|
|
||||||
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
|
||||||
s, err := o.DecodeStringBytes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_String(base, p.field) = &s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
|
|
||||||
s, err := o.DecodeStringBytes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_StringVal(base, p.field) = s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of bytes ([]byte).
|
|
||||||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
|
||||||
b, err := o.DecodeRawBytes(true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_Bytes(base, p.field) = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of bools ([]bool).
|
|
||||||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v := structPointer_BoolSlice(base, p.field)
|
|
||||||
*v = append(*v, u != 0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of bools ([]bool) in packed format.
|
|
||||||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
|
||||||
v := structPointer_BoolSlice(base, p.field)
|
|
||||||
|
|
||||||
nn, err := o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nb := int(nn) // number of bytes of encoded bools
|
|
||||||
fin := o.index + nb
|
|
||||||
if fin < o.index {
|
|
||||||
return errOverflow
|
|
||||||
}
|
|
||||||
|
|
||||||
y := *v
|
|
||||||
for o.index < fin {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
y = append(y, u != 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
*v = y
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int32s ([]int32).
|
|
||||||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int32s ([]int32) in packed format.
|
|
||||||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
|
||||||
v := structPointer_Word32Slice(base, p.field)
|
|
||||||
|
|
||||||
nn, err := o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nb := int(nn) // number of bytes of encoded int32s
|
|
||||||
|
|
||||||
fin := o.index + nb
|
|
||||||
if fin < o.index {
|
|
||||||
return errOverflow
|
|
||||||
}
|
|
||||||
for o.index < fin {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v.Append(uint32(u))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int64s ([]int64).
|
|
||||||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
structPointer_Word64Slice(base, p.field).Append(u)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int64s ([]int64) in packed format.
|
|
||||||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
|
||||||
v := structPointer_Word64Slice(base, p.field)
|
|
||||||
|
|
||||||
nn, err := o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nb := int(nn) // number of bytes of encoded int64s
|
|
||||||
|
|
||||||
fin := o.index + nb
|
|
||||||
if fin < o.index {
|
|
||||||
return errOverflow
|
|
||||||
}
|
|
||||||
for o.index < fin {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v.Append(u)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of strings ([]string).
|
|
||||||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
|
||||||
s, err := o.DecodeStringBytes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v := structPointer_StringSlice(base, p.field)
|
|
||||||
*v = append(*v, s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of slice of bytes ([][]byte).
|
|
||||||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
|
||||||
b, err := o.DecodeRawBytes(true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v := structPointer_BytesSlice(base, p.field)
|
|
||||||
*v = append(*v, b)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a map field.
|
|
||||||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
|
||||||
raw, err := o.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
oi := o.index // index at the end of this map entry
|
|
||||||
o.index -= len(raw) // move buffer back to start of map entry
|
|
||||||
|
|
||||||
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
|
|
||||||
if mptr.Elem().IsNil() {
|
|
||||||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
|
|
||||||
}
|
|
||||||
v := mptr.Elem() // map[K]V
|
|
||||||
|
|
||||||
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
|
||||||
// See enc_new_map for why.
|
|
||||||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
|
||||||
keybase := toStructPointer(keyptr.Addr()) // **K
|
|
||||||
|
|
||||||
var valbase structPointer
|
|
||||||
var valptr reflect.Value
|
|
||||||
switch p.mtype.Elem().Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
// []byte
|
|
||||||
var dummy []byte
|
|
||||||
valptr = reflect.ValueOf(&dummy) // *[]byte
|
|
||||||
valbase = toStructPointer(valptr) // *[]byte
|
|
||||||
case reflect.Ptr:
|
|
||||||
// message; valptr is **Msg; need to allocate the intermediate pointer
|
|
||||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
|
||||||
valptr.Set(reflect.New(valptr.Type().Elem()))
|
|
||||||
valbase = toStructPointer(valptr)
|
|
||||||
default:
|
|
||||||
// everything else
|
|
||||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
|
||||||
valbase = toStructPointer(valptr.Addr()) // **V
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode.
|
|
||||||
// This parses a restricted wire format, namely the encoding of a message
|
|
||||||
// with two fields. See enc_new_map for the format.
|
|
||||||
for o.index < oi {
|
|
||||||
// tagcode for key and value properties are always a single byte
|
|
||||||
// because they have tags 1 and 2.
|
|
||||||
tagcode := o.buf[o.index]
|
|
||||||
o.index++
|
|
||||||
switch tagcode {
|
|
||||||
case p.mkeyprop.tagcode[0]:
|
|
||||||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case p.mvalprop.tagcode[0]:
|
|
||||||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// TODO: Should we silently skip this instead?
|
|
||||||
return fmt.Errorf("proto: bad map data tag %d", raw[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keyelem, valelem := keyptr.Elem(), valptr.Elem()
|
|
||||||
if !keyelem.IsValid() {
|
|
||||||
keyelem = reflect.Zero(p.mtype.Key())
|
|
||||||
}
|
|
||||||
if !valelem.IsValid() {
|
|
||||||
valelem = reflect.Zero(p.mtype.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetMapIndex(keyelem, valelem)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a group.
|
|
||||||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
|
||||||
bas := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(bas) {
|
|
||||||
// allocate new nested message
|
|
||||||
bas = toStructPointer(reflect.New(p.stype))
|
|
||||||
structPointer_SetStructPointer(base, p.field, bas)
|
|
||||||
}
|
|
||||||
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode an embedded message.
|
|
||||||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
|
||||||
raw, e := o.DecodeRawBytes(false)
|
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
bas := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(bas) {
|
|
||||||
// allocate new nested message
|
|
||||||
bas = toStructPointer(reflect.New(p.stype))
|
|
||||||
structPointer_SetStructPointer(base, p.field, bas)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the object can unmarshal itself, let it.
|
|
||||||
if p.isUnmarshaler {
|
|
||||||
iv := structPointer_Interface(bas, p.stype)
|
|
||||||
return iv.(Unmarshaler).Unmarshal(raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
obuf := o.buf
|
|
||||||
oi := o.index
|
|
||||||
o.buf = raw
|
|
||||||
o.index = 0
|
|
||||||
|
|
||||||
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
|
||||||
o.buf = obuf
|
|
||||||
o.index = oi
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of embedded messages.
|
|
||||||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
|
||||||
return o.dec_slice_struct(p, false, base)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of embedded groups.
|
|
||||||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
|
||||||
return o.dec_slice_struct(p, true, base)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of structs ([]*struct).
|
|
||||||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
|
||||||
v := reflect.New(p.stype)
|
|
||||||
bas := toStructPointer(v)
|
|
||||||
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
|
||||||
|
|
||||||
if is_group {
|
|
||||||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
raw, err := o.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the object can unmarshal itself, let it.
|
|
||||||
if p.isUnmarshaler {
|
|
||||||
iv := v.Interface()
|
|
||||||
return iv.(Unmarshaler).Unmarshal(raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
obuf := o.buf
|
|
||||||
oi := o.index
|
|
||||||
o.buf = raw
|
|
||||||
o.index = 0
|
|
||||||
|
|
||||||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
|
||||||
|
|
||||||
o.buf = obuf
|
|
||||||
o.index = oi
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type generatedDiscarder interface {
|
||||||
|
XXX_DiscardUnknown()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardUnknown recursively discards all unknown fields from this message
|
||||||
|
// and all embedded messages.
|
||||||
|
//
|
||||||
|
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||||
|
// of such fields are preserved in the Message. This allows a later call to
|
||||||
|
// marshal to be able to produce a message that continues to have those
|
||||||
|
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||||
|
// explicitly clear the unknown fields after unmarshaling.
|
||||||
|
//
|
||||||
|
// For proto2 messages, the unknown fields of message extensions are only
|
||||||
|
// discarded from messages that have been accessed via GetExtension.
|
||||||
|
func DiscardUnknown(m Message) {
|
||||||
|
if m, ok := m.(generatedDiscarder); ok {
|
||||||
|
m.XXX_DiscardUnknown()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||||
|
// but the master branch has no implementation for InternalMessageInfo,
|
||||||
|
// so it would be more work to replicate that approach.
|
||||||
|
discardLegacy(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardUnknown recursively discards all unknown fields.
|
||||||
|
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||||
|
di := atomicLoadDiscardInfo(&a.discard)
|
||||||
|
if di == nil {
|
||||||
|
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||||
|
atomicStoreDiscardInfo(&a.discard, di)
|
||||||
|
}
|
||||||
|
di.discard(toPointer(&m))
|
||||||
|
}
|
||||||
|
|
||||||
|
type discardInfo struct {
|
||||||
|
typ reflect.Type
|
||||||
|
|
||||||
|
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||||
|
lock sync.Mutex
|
||||||
|
|
||||||
|
fields []discardFieldInfo
|
||||||
|
unrecognized field
|
||||||
|
}
|
||||||
|
|
||||||
|
type discardFieldInfo struct {
|
||||||
|
field field // Offset of field, guaranteed to be valid
|
||||||
|
discard func(src pointer)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||||
|
discardInfoLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||||
|
discardInfoLock.Lock()
|
||||||
|
defer discardInfoLock.Unlock()
|
||||||
|
di := discardInfoMap[t]
|
||||||
|
if di == nil {
|
||||||
|
di = &discardInfo{typ: t}
|
||||||
|
discardInfoMap[t] = di
|
||||||
|
}
|
||||||
|
return di
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *discardInfo) discard(src pointer) {
|
||||||
|
if src.isNil() {
|
||||||
|
return // Nothing to do.
|
||||||
|
}
|
||||||
|
|
||||||
|
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||||
|
di.computeDiscardInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fi := range di.fields {
|
||||||
|
sfp := src.offset(fi.field)
|
||||||
|
fi.discard(sfp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For proto2 messages, only discard unknown fields in message extensions
|
||||||
|
// that have been accessed via GetExtension.
|
||||||
|
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||||
|
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||||
|
emm, _ := em.extensionsRead()
|
||||||
|
for _, mx := range emm {
|
||||||
|
if m, ok := mx.value.(Message); ok {
|
||||||
|
DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if di.unrecognized.IsValid() {
|
||||||
|
*src.offset(di.unrecognized).toBytes() = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *discardInfo) computeDiscardInfo() {
|
||||||
|
di.lock.Lock()
|
||||||
|
defer di.lock.Unlock()
|
||||||
|
if di.initialized != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := di.typ
|
||||||
|
n := t.NumField()
|
||||||
|
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dfi := discardFieldInfo{field: toField(&f)}
|
||||||
|
tf := f.Type
|
||||||
|
|
||||||
|
// Unwrap tf to get its most basic type.
|
||||||
|
var isPointer, isSlice bool
|
||||||
|
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||||
|
isSlice = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if tf.Kind() == reflect.Ptr {
|
||||||
|
isPointer = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
switch {
|
||||||
|
case !isPointer:
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||||
|
case isSlice: // E.g., []*pb.T
|
||||||
|
di := getDiscardInfo(tf)
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
sps := src.getPointerSlice()
|
||||||
|
for _, sp := range sps {
|
||||||
|
if !sp.isNil() {
|
||||||
|
di.discard(sp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., *pb.T
|
||||||
|
di := getDiscardInfo(tf)
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
sp := src.getPointer()
|
||||||
|
if !sp.isNil() {
|
||||||
|
di.discard(sp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||||
|
default: // E.g., map[K]V
|
||||||
|
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
sm := src.asPointerTo(tf).Elem()
|
||||||
|
if sm.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
DiscardUnknown(val.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dfi.discard = func(pointer) {} // Noop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
// Must be oneof field.
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||||
|
default: // E.g., interface{}
|
||||||
|
// TODO: Make this faster?
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
su := src.asPointerTo(tf).Elem()
|
||||||
|
if !su.IsNil() {
|
||||||
|
sv := su.Elem().Elem().Field(0)
|
||||||
|
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch sv.Type().Kind() {
|
||||||
|
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||||
|
DiscardUnknown(sv.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
di.fields = append(di.fields, dfi)
|
||||||
|
}
|
||||||
|
|
||||||
|
di.unrecognized = invalidField
|
||||||
|
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||||
|
if f.Type != reflect.TypeOf([]byte{}) {
|
||||||
|
panic("expected XXX_unrecognized to be of type []byte")
|
||||||
|
}
|
||||||
|
di.unrecognized = toField(&f)
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StoreInt32(&di.initialized, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func discardLegacy(m Message) {
|
||||||
|
v := reflect.ValueOf(m)
|
||||||
|
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
v = v.Elem()
|
||||||
|
if v.Kind() != reflect.Struct {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := v.Type()
|
||||||
|
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vf := v.Field(i)
|
||||||
|
tf := f.Type
|
||||||
|
|
||||||
|
// Unwrap tf to get its most basic type.
|
||||||
|
var isPointer, isSlice bool
|
||||||
|
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||||
|
isSlice = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if tf.Kind() == reflect.Ptr {
|
||||||
|
isPointer = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
switch {
|
||||||
|
case !isPointer:
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||||
|
case isSlice: // E.g., []*pb.T
|
||||||
|
for j := 0; j < vf.Len(); j++ {
|
||||||
|
discardLegacy(vf.Index(j).Interface().(Message))
|
||||||
|
}
|
||||||
|
default: // E.g., *pb.T
|
||||||
|
discardLegacy(vf.Interface().(Message))
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||||
|
default: // E.g., map[K]V
|
||||||
|
tv := vf.Type().Elem()
|
||||||
|
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||||
|
for _, key := range vf.MapKeys() {
|
||||||
|
val := vf.MapIndex(key)
|
||||||
|
discardLegacy(val.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
// Must be oneof field.
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||||
|
default: // E.g., test_proto.isCommunique_Union interface
|
||||||
|
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||||
|
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||||
|
if !vf.IsNil() {
|
||||||
|
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||||
|
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||||
|
if vf.Kind() == reflect.Ptr {
|
||||||
|
discardLegacy(vf.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||||
|
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||||
|
panic("expected XXX_unrecognized to be of type []byte")
|
||||||
|
}
|
||||||
|
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// For proto2 messages, only discard unknown fields in message extensions
|
||||||
|
// that have been accessed via GetExtension.
|
||||||
|
if em, err := extendable(m); err == nil {
|
||||||
|
// Ignore lock since discardLegacy is not concurrency safe.
|
||||||
|
emm, _ := em.extensionsRead()
|
||||||
|
for _, mx := range emm {
|
||||||
|
if m, ok := mx.value.(Message); ok {
|
||||||
|
discardLegacy(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
1189
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
1189
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
30
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
30
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
|||||||
// set/unset mismatch
|
// set/unset mismatch
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
b1, ok := f1.Interface().(raw)
|
|
||||||
if ok {
|
|
||||||
b2 := f2.Interface().(raw)
|
|
||||||
// RawMessage
|
|
||||||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f1, f2 = f1.Elem(), f2.Elem()
|
f1, f2 = f1.Elem(), f2.Elem()
|
||||||
}
|
}
|
||||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||||
@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
|||||||
|
|
||||||
u1 := uf.Bytes()
|
u1 := uf.Bytes()
|
||||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||||
if !bytes.Equal(u1, u2) {
|
return bytes.Equal(u1, u2)
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// v1 and v2 are known to have the same type.
|
// v1 and v2 are known to have the same type.
|
||||||
@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|||||||
|
|
||||||
m1, m2 := e1.value, e2.value
|
m1, m2 := e1.value, e2.value
|
||||||
|
|
||||||
|
if m1 == nil && m2 == nil {
|
||||||
|
// Both have only encoded form.
|
||||||
|
if bytes.Equal(e1.enc, e2.enc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// The bytes are different, but the extensions might still be
|
||||||
|
// equal. We need to decode them to compare.
|
||||||
|
}
|
||||||
|
|
||||||
if m1 != nil && m2 != nil {
|
if m1 != nil && m2 != nil {
|
||||||
// Both are unencoded.
|
// Both are unencoded.
|
||||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||||
@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|||||||
desc = m[extNum]
|
desc = m[extNum]
|
||||||
}
|
}
|
||||||
if desc == nil {
|
if desc == nil {
|
||||||
|
// If both have only encoded form and the bytes are the same,
|
||||||
|
// it is handled above. We get here when the bytes are different.
|
||||||
|
// We don't know how to decode it, so just compare them as byte
|
||||||
|
// slices.
|
||||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||||
continue
|
return false
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
if m1 == nil {
|
if m1 == nil {
|
||||||
|
208
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
208
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@ -38,6 +38,7 @@ package proto
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
@ -91,14 +92,29 @@ func (n notLocker) Unlock() {}
|
|||||||
// extendable returns the extendableProto interface for the given generated proto message.
|
// extendable returns the extendableProto interface for the given generated proto message.
|
||||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||||
// the extendableProto interface.
|
// the extendableProto interface.
|
||||||
func extendable(p interface{}) (extendableProto, bool) {
|
func extendable(p interface{}) (extendableProto, error) {
|
||||||
if ep, ok := p.(extendableProto); ok {
|
switch p := p.(type) {
|
||||||
return ep, ok
|
case extendableProto:
|
||||||
|
if isNilPtr(p) {
|
||||||
|
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
case extendableProtoV1:
|
||||||
|
if isNilPtr(p) {
|
||||||
|
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||||
|
}
|
||||||
|
return extensionAdapter{p}, nil
|
||||||
}
|
}
|
||||||
if ep, ok := p.(extendableProtoV1); ok {
|
// Don't allocate a specific error containing %T:
|
||||||
return extensionAdapter{ep}, ok
|
// this is the hot path for Clone and MarshalText.
|
||||||
}
|
return nil, errNotExtendable
|
||||||
return nil, false
|
}
|
||||||
|
|
||||||
|
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||||
|
|
||||||
|
func isNilPtr(x interface{}) bool {
|
||||||
|
v := reflect.ValueOf(x)
|
||||||
|
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||||
@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc
|
|||||||
return e.p.extensionMap, &e.p.mu
|
return e.p.extensionMap, &e.p.mu
|
||||||
}
|
}
|
||||||
|
|
||||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
|
||||||
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
|
|
||||||
|
|
||||||
// ExtensionDesc represents an extension specification.
|
// ExtensionDesc represents an extension specification.
|
||||||
// Used in generated code from the protocol compiler.
|
// Used in generated code from the protocol compiler.
|
||||||
type ExtensionDesc struct {
|
type ExtensionDesc struct {
|
||||||
@ -179,8 +192,8 @@ type Extension struct {
|
|||||||
|
|
||||||
// SetRawExtension is for testing only.
|
// SetRawExtension is for testing only.
|
||||||
func SetRawExtension(base Message, id int32, b []byte) {
|
func SetRawExtension(base Message, id int32, b []byte) {
|
||||||
epb, ok := extendable(base)
|
epb, err := extendable(base)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
extmap := epb.extensionsWrite()
|
extmap := epb.extensionsWrite()
|
||||||
@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
|||||||
pbi = ea.extendableProtoV1
|
pbi = ea.extendableProtoV1
|
||||||
}
|
}
|
||||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||||
}
|
}
|
||||||
// Check the range.
|
// Check the range.
|
||||||
if !isExtensionField(pb, extension.Field) {
|
if !isExtensionField(pb, extension.Field) {
|
||||||
@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
|
|||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
|
||||||
func encodeExtensions(e *XXX_InternalExtensions) error {
|
|
||||||
m, mu := e.extensionsRead()
|
|
||||||
if m == nil {
|
|
||||||
return nil // fast path
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
return encodeExtensionsMap(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
|
||||||
func encodeExtensionsMap(m map[int32]Extension) error {
|
|
||||||
for k, e := range m {
|
|
||||||
if e.value == nil || e.desc == nil {
|
|
||||||
// Extension is only in its encoded form.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't skip extensions that have an encoded form set,
|
|
||||||
// because the extension value may have been mutated after
|
|
||||||
// the last time this function was called.
|
|
||||||
|
|
||||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
|
||||||
props := extensionProperties(e.desc)
|
|
||||||
|
|
||||||
p := NewBuffer(nil)
|
|
||||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|
||||||
// Pass a *T with a zero field and hope it all works out.
|
|
||||||
x := reflect.New(et)
|
|
||||||
x.Elem().Set(reflect.ValueOf(e.value))
|
|
||||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.enc = p.buf
|
|
||||||
m[k] = e
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func extensionsSize(e *XXX_InternalExtensions) (n int) {
|
|
||||||
m, mu := e.extensionsRead()
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
return extensionsMapSize(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func extensionsMapSize(m map[int32]Extension) (n int) {
|
|
||||||
for _, e := range m {
|
|
||||||
if e.value == nil || e.desc == nil {
|
|
||||||
// Extension is only in its encoded form.
|
|
||||||
n += len(e.enc)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't skip extensions that have an encoded form set,
|
|
||||||
// because the extension value may have been mutated after
|
|
||||||
// the last time this function was called.
|
|
||||||
|
|
||||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
|
||||||
props := extensionProperties(e.desc)
|
|
||||||
|
|
||||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|
||||||
// Pass a *T with a zero field and hope it all works out.
|
|
||||||
x := reflect.New(et)
|
|
||||||
x.Elem().Set(reflect.ValueOf(e.value))
|
|
||||||
n += props.size(props, toStructPointer(x))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasExtension returns whether the given extension is present in pb.
|
// HasExtension returns whether the given extension is present in pb.
|
||||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||||
// TODO: Check types, field numbers, etc.?
|
// TODO: Check types, field numbers, etc.?
|
||||||
epb, ok := extendable(pb)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
extmap, mu := epb.extensionsRead()
|
extmap, mu := epb.extensionsRead()
|
||||||
@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
_, ok = extmap[extension.Field]
|
_, ok := extmap[extension.Field]
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearExtension removes the given extension from pb.
|
// ClearExtension removes the given extension from pb.
|
||||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||||
epb, ok := extendable(pb)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO: Check types, field numbers, etc.?
|
// TODO: Check types, field numbers, etc.?
|
||||||
@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) {
|
|||||||
delete(extmap, extension.Field)
|
delete(extmap, extension.Field)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetExtension parses and returns the given extension of pb.
|
// GetExtension retrieves a proto2 extended field from pb.
|
||||||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
//
|
||||||
|
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||||
|
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||||
|
// If the field is not present, then the default value is returned (if one is specified),
|
||||||
|
// otherwise ErrMissingExtension is reported.
|
||||||
|
//
|
||||||
|
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||||
|
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||||
epb, ok := extendable(pb)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return nil, errors.New("proto: not an extendable proto")
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
if extension.ExtendedType != nil {
|
||||||
return nil, err
|
// can only check type if this is a complete descriptor
|
||||||
|
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
emap, mu := epb.extensionsRead()
|
emap, mu := epb.extensionsRead()
|
||||||
@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
|||||||
return e.value, nil
|
return e.value, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if extension.ExtensionType == nil {
|
||||||
|
// incomplete descriptor
|
||||||
|
return e.enc, nil
|
||||||
|
}
|
||||||
|
|
||||||
v, err := decodeExtension(e.enc, extension)
|
v, err := decodeExtension(e.enc, extension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
|||||||
// defaultExtensionValue returns the default value for extension.
|
// defaultExtensionValue returns the default value for extension.
|
||||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
if extension.ExtensionType == nil {
|
||||||
|
// incomplete descriptor, so no default
|
||||||
|
return nil, ErrMissingExtension
|
||||||
|
}
|
||||||
|
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
t := reflect.TypeOf(extension.ExtensionType)
|
||||||
props := extensionProperties(extension)
|
props := extensionProperties(extension)
|
||||||
|
|
||||||
@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
|||||||
|
|
||||||
// decodeExtension decodes an extension encoded in b.
|
// decodeExtension decodes an extension encoded in b.
|
||||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||||
o := NewBuffer(b)
|
|
||||||
|
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
t := reflect.TypeOf(extension.ExtensionType)
|
||||||
|
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||||
props := extensionProperties(extension)
|
|
||||||
|
|
||||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||||
// Allocate a "field" to store the pointer/slice itself; the
|
// Allocate space to store the pointer/slice.
|
||||||
// pointer/slice will be stored here. We pass
|
|
||||||
// the address of this field to props.dec.
|
|
||||||
// This passes a zero field and a *t and lets props.dec
|
|
||||||
// interpret it as a *struct{ x t }.
|
|
||||||
value := reflect.New(t).Elem()
|
value := reflect.New(t).Elem()
|
||||||
|
|
||||||
|
var err error
|
||||||
for {
|
for {
|
||||||
// Discard wire type and field number varint. It isn't needed.
|
x, n := decodeVarint(b)
|
||||||
if _, err := o.DecodeVarint(); err != nil {
|
if n == 0 {
|
||||||
|
return nil, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
wire := int(x) & 7
|
||||||
|
|
||||||
|
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
if len(b) == 0 {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.index >= len(o.buf) {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
|||||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||||
epb, ok := extendable(pb)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return nil, errors.New("proto: not an extendable proto")
|
return nil, err
|
||||||
}
|
}
|
||||||
extensions = make([]interface{}, len(es))
|
extensions = make([]interface{}, len(es))
|
||||||
for i, e := range es {
|
for i, e := range es {
|
||||||
@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
|
|||||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||||
// just the Field field, which defines the extension's field number.
|
// just the Field field, which defines the extension's field number.
|
||||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||||
epb, ok := extendable(pb)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
|
return nil, err
|
||||||
}
|
}
|
||||||
registeredExtensions := RegisteredExtensions(pb)
|
registeredExtensions := RegisteredExtensions(pb)
|
||||||
|
|
||||||
@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
|||||||
|
|
||||||
// SetExtension sets the specified extension of pb to the specified value.
|
// SetExtension sets the specified extension of pb to the specified value.
|
||||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||||
epb, ok := extendable(pb)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return errors.New("proto: not an extendable proto")
|
return err
|
||||||
}
|
}
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
|||||||
|
|
||||||
// ClearAllExtensions clears all extensions from pb.
|
// ClearAllExtensions clears all extensions from pb.
|
||||||
func ClearAllExtensions(pb Message) {
|
func ClearAllExtensions(pb Message) {
|
||||||
epb, ok := extendable(pb)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
m := epb.extensionsWrite()
|
m := epb.extensionsWrite()
|
||||||
|
70
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
70
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@ -265,6 +265,7 @@ package proto
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -273,6 +274,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
|
||||||
|
|
||||||
// Message is implemented by generated protocol buffer messages.
|
// Message is implemented by generated protocol buffer messages.
|
||||||
type Message interface {
|
type Message interface {
|
||||||
Reset()
|
Reset()
|
||||||
@ -309,16 +312,7 @@ type Buffer struct {
|
|||||||
buf []byte // encode/decode byte stream
|
buf []byte // encode/decode byte stream
|
||||||
index int // read point
|
index int // read point
|
||||||
|
|
||||||
// pools of basic types to amortize allocation.
|
deterministic bool
|
||||||
bools []bool
|
|
||||||
uint32s []uint32
|
|
||||||
uint64s []uint64
|
|
||||||
|
|
||||||
// extra pools, only used with pointer_reflect.go
|
|
||||||
int32s []int32
|
|
||||||
int64s []int64
|
|
||||||
float32s []float32
|
|
||||||
float64s []float64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||||
@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) {
|
|||||||
// Bytes returns the contents of the Buffer.
|
// Bytes returns the contents of the Buffer.
|
||||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||||
|
|
||||||
|
// SetDeterministic sets whether to use deterministic serialization.
|
||||||
|
//
|
||||||
|
// Deterministic serialization guarantees that for a given binary, equal
|
||||||
|
// messages will always be serialized to the same bytes. This implies:
|
||||||
|
//
|
||||||
|
// - Repeated serialization of a message will return the same bytes.
|
||||||
|
// - Different processes of the same binary (which may be executing on
|
||||||
|
// different machines) will serialize equal messages to the same bytes.
|
||||||
|
//
|
||||||
|
// Note that the deterministic serialization is NOT canonical across
|
||||||
|
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||||
|
// across different builds with schema changes due to unknown fields.
|
||||||
|
// Users who need canonical serialization (e.g., persistent storage in a
|
||||||
|
// canonical form, fingerprinting, etc.) should define their own
|
||||||
|
// canonicalization specification and implement their own serializer rather
|
||||||
|
// than relying on this API.
|
||||||
|
//
|
||||||
|
// If deterministic serialization is requested, map entries will be sorted
|
||||||
|
// by keys in lexographical order. This is an implementation detail and
|
||||||
|
// subject to change.
|
||||||
|
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||||
|
p.deterministic = deterministic
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||||
*/
|
*/
|
||||||
@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
|
|||||||
return sf, false, nil
|
return sf, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||||
// Map fields may have key types of non-float scalars, strings and enums.
|
// Map fields may have key types of non-float scalars, strings and enums.
|
||||||
// The easiest way to sort them in some deterministic order is to use fmt.
|
|
||||||
// If this turns out to be inefficient we can always consider other options,
|
|
||||||
// such as doing a Schwartzian transform.
|
|
||||||
|
|
||||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||||
s := mapKeySorter{
|
s := mapKeySorter{vs: vs}
|
||||||
vs: vs,
|
|
||||||
// default Less function: textual comparison
|
|
||||||
less: func(a, b reflect.Value) bool {
|
|
||||||
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
|
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||||
// numeric keys are sorted numerically.
|
|
||||||
if len(vs) == 0 {
|
if len(vs) == 0 {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
|
|||||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||||
case reflect.Uint32, reflect.Uint64:
|
case reflect.Uint32, reflect.Uint64:
|
||||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||||
|
case reflect.Bool:
|
||||||
|
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||||
|
case reflect.String:
|
||||||
|
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||||
}
|
}
|
||||||
|
|
||||||
return s
|
return s
|
||||||
@ -895,3 +909,13 @@ const ProtoPackageIsVersion2 = true
|
|||||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
const ProtoPackageIsVersion1 = true
|
const ProtoPackageIsVersion1 = true
|
||||||
|
|
||||||
|
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||||
|
// This type is not intended to be used by non-generated code.
|
||||||
|
// This type is not subject to any compatibility guarantee.
|
||||||
|
type InternalMessageInfo struct {
|
||||||
|
marshal *marshalInfo
|
||||||
|
unmarshal *unmarshalInfo
|
||||||
|
merge *mergeInfo
|
||||||
|
discard *discardInfo
|
||||||
|
}
|
||||||
|
81
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
81
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
@ -42,6 +42,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||||
@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ms *messageSet) Has(pb Message) bool {
|
func (ms *messageSet) Has(pb Message) bool {
|
||||||
if ms.find(pb) != nil {
|
return ms.find(pb) != nil
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||||
@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte {
|
|||||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||||
var m map[int32]Extension
|
return marshalMessageSet(exts, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
|
||||||
|
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
|
||||||
switch exts := exts.(type) {
|
switch exts := exts.(type) {
|
||||||
case *XXX_InternalExtensions:
|
case *XXX_InternalExtensions:
|
||||||
if err := encodeExtensions(exts); err != nil {
|
var u marshalInfo
|
||||||
return nil, err
|
siz := u.sizeMessageSet(exts)
|
||||||
}
|
b := make([]byte, 0, siz)
|
||||||
m, _ = exts.extensionsRead()
|
return u.appendMessageSet(b, exts, deterministic)
|
||||||
|
|
||||||
case map[int32]Extension:
|
case map[int32]Extension:
|
||||||
if err := encodeExtensionsMap(exts); err != nil {
|
// This is an old-style extension map.
|
||||||
return nil, err
|
// Wrap it in a new-style XXX_InternalExtensions.
|
||||||
|
ie := XXX_InternalExtensions{
|
||||||
|
p: &struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
extensionMap map[int32]Extension
|
||||||
|
}{
|
||||||
|
extensionMap: exts,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
m = exts
|
|
||||||
|
var u marshalInfo
|
||||||
|
siz := u.sizeMessageSet(&ie)
|
||||||
|
b := make([]byte, 0, siz)
|
||||||
|
return u.appendMessageSet(b, &ie, deterministic)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("proto: not an extension map")
|
return nil, errors.New("proto: not an extension map")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort extension IDs to provide a deterministic encoding.
|
|
||||||
// See also enc_map in encode.go.
|
|
||||||
ids := make([]int, 0, len(m))
|
|
||||||
for id := range m {
|
|
||||||
ids = append(ids, int(id))
|
|
||||||
}
|
|
||||||
sort.Ints(ids)
|
|
||||||
|
|
||||||
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
|
||||||
for _, id := range ids {
|
|
||||||
e := m[int32(id)]
|
|
||||||
// Remove the wire type and field number varint, as well as the length varint.
|
|
||||||
msg := skipVarint(skipVarint(e.enc))
|
|
||||||
|
|
||||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
|
||||||
TypeId: Int32(int32(id)),
|
|
||||||
Message: msg,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return Marshal(ms)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||||
var m map[int32]Extension
|
var m map[int32]Extension
|
||||||
switch exts := exts.(type) {
|
switch exts := exts.(type) {
|
||||||
@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
|||||||
var m map[int32]Extension
|
var m map[int32]Extension
|
||||||
switch exts := exts.(type) {
|
switch exts := exts.(type) {
|
||||||
case *XXX_InternalExtensions:
|
case *XXX_InternalExtensions:
|
||||||
m, _ = exts.extensionsRead()
|
var mu sync.Locker
|
||||||
|
m, mu = exts.extensionsRead()
|
||||||
|
if m != nil {
|
||||||
|
// Keep the extensions map locked until we're done marshaling to prevent
|
||||||
|
// races between marshaling and unmarshaling the lazily-{en,de}coded
|
||||||
|
// values.
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
}
|
||||||
case map[int32]Extension:
|
case map[int32]Extension:
|
||||||
m = exts
|
m = exts
|
||||||
default:
|
default:
|
||||||
@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
|||||||
|
|
||||||
for i, id := range ids {
|
for i, id := range ids {
|
||||||
ext := m[id]
|
ext := m[id]
|
||||||
if i > 0 {
|
|
||||||
b.WriteByte(',')
|
|
||||||
}
|
|
||||||
|
|
||||||
msd, ok := messageSetMap[id]
|
msd, ok := messageSetMap[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
// Unknown type; we can't render it, so skip it.
|
// Unknown type; we can't render it, so skip it.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if i > 0 && b.Len() > 1 {
|
||||||
|
b.WriteByte(',')
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||||
|
|
||||||
x := ext.value
|
x := ext.value
|
||||||
|
645
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
645
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
@ -29,7 +29,7 @@
|
|||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
// +build appengine js
|
// +build purego appengine js
|
||||||
|
|
||||||
// This file contains an implementation of proto field accesses using package reflect.
|
// This file contains an implementation of proto field accesses using package reflect.
|
||||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||||
@ -38,32 +38,13 @@
|
|||||||
package proto
|
package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A structPointer is a pointer to a struct.
|
const unsafeAllowed = false
|
||||||
type structPointer struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
// A field identifies a field in a struct, accessible from a pointer.
|
||||||
// The reflect value must itself be a pointer to a struct.
|
|
||||||
func toStructPointer(v reflect.Value) structPointer {
|
|
||||||
return structPointer{v}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
|
||||||
func structPointer_IsNil(p structPointer) bool {
|
|
||||||
return p.v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interface returns the struct pointer as an interface value.
|
|
||||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
|
||||||
return p.v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a structPointer.
|
|
||||||
// In this implementation, a field is identified by the sequence of field indices
|
// In this implementation, a field is identified by the sequence of field indices
|
||||||
// passed to reflect's FieldByIndex.
|
// passed to reflect's FieldByIndex.
|
||||||
type field []int
|
type field []int
|
||||||
@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field {
|
|||||||
// invalidField is an invalid field identifier.
|
// invalidField is an invalid field identifier.
|
||||||
var invalidField = field(nil)
|
var invalidField = field(nil)
|
||||||
|
|
||||||
|
// zeroField is a noop when calling pointer.offset.
|
||||||
|
var zeroField = field([]int{})
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
// IsValid reports whether the field identifier is valid.
|
||||||
func (f field) IsValid() bool { return f != nil }
|
func (f field) IsValid() bool { return f != nil }
|
||||||
|
|
||||||
// field returns the given field in the struct as a reflect value.
|
// The pointer type is for the table-driven decoder.
|
||||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
// The implementation here uses a reflect.Value of pointer type to
|
||||||
// Special case: an extension map entry with a value of type T
|
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||||
// passes a *T to the struct-handling code with a zero field,
|
// instead of reflect to implement the same (but faster) interface.
|
||||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
type pointer struct {
|
||||||
// which has the same memory layout. We have to handle that case
|
|
||||||
// specially, because reflect will panic if we call FieldByIndex on a
|
|
||||||
// non-struct.
|
|
||||||
if f == nil {
|
|
||||||
return p.v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.v.Elem().FieldByIndex(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ifield returns the given field in the struct as an interface value.
|
|
||||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
|
||||||
return structPointer_field(p, f).Addr().Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns the address of a []byte field in the struct.
|
|
||||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
|
||||||
return structPointer_ifield(p, f).(*[]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
|
||||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
|
||||||
return structPointer_ifield(p, f).(*[][]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool returns the address of a *bool field in the struct.
|
|
||||||
func structPointer_Bool(p structPointer, f field) **bool {
|
|
||||||
return structPointer_ifield(p, f).(**bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolVal returns the address of a bool field in the struct.
|
|
||||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
|
||||||
return structPointer_ifield(p, f).(*bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolSlice returns the address of a []bool field in the struct.
|
|
||||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
|
||||||
return structPointer_ifield(p, f).(*[]bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the address of a *string field in the struct.
|
|
||||||
func structPointer_String(p structPointer, f field) **string {
|
|
||||||
return structPointer_ifield(p, f).(**string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringVal returns the address of a string field in the struct.
|
|
||||||
func structPointer_StringVal(p structPointer, f field) *string {
|
|
||||||
return structPointer_ifield(p, f).(*string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringSlice returns the address of a []string field in the struct.
|
|
||||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|
||||||
return structPointer_ifield(p, f).(*[]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extensions returns the address of an extension map field in the struct.
|
|
||||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
|
||||||
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtMap returns the address of an extension map field in the struct.
|
|
||||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
|
||||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
|
||||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
|
||||||
return structPointer_field(p, f).Addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStructPointer writes a *struct field in the struct.
|
|
||||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
|
||||||
structPointer_field(p, f).Set(q.v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStructPointer reads a *struct field in the struct.
|
|
||||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
|
||||||
return structPointer{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructPointerSlice the address of a []*struct field in the struct.
|
|
||||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
|
||||||
return structPointerSlice{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
|
||||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
|
||||||
type structPointerSlice struct {
|
|
||||||
v reflect.Value
|
v reflect.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
// toPointer converts an interface of pointer type to a pointer
|
||||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
// that points to the same target.
|
||||||
func (p structPointerSlice) Append(q structPointer) {
|
func toPointer(i *Message) pointer {
|
||||||
p.v.Set(reflect.Append(p.v, q.v))
|
return pointer{v: reflect.ValueOf(*i)}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// toAddrPointer converts an interface to a pointer that points to
|
||||||
int32Type = reflect.TypeOf(int32(0))
|
// the interface data.
|
||||||
uint32Type = reflect.TypeOf(uint32(0))
|
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||||
float32Type = reflect.TypeOf(float32(0))
|
v := reflect.ValueOf(*i)
|
||||||
int64Type = reflect.TypeOf(int64(0))
|
u := reflect.New(v.Type())
|
||||||
uint64Type = reflect.TypeOf(uint64(0))
|
u.Elem().Set(v)
|
||||||
float64Type = reflect.TypeOf(float64(0))
|
return pointer{v: u}
|
||||||
)
|
|
||||||
|
|
||||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
|
||||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
|
||||||
type word32 struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||||
func word32_IsNil(p word32) bool {
|
func valToPointer(v reflect.Value) pointer {
|
||||||
|
return pointer{v: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
// offset converts from a pointer to a structure to a pointer to
|
||||||
|
// one of its fields.
|
||||||
|
func (p pointer) offset(f field) pointer {
|
||||||
|
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pointer) isNil() bool {
|
||||||
return p.v.IsNil()
|
return p.v.IsNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets p to point at a newly allocated word with bits set to x.
|
// grow updates the slice s in place to make it one element longer.
|
||||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
// s must be addressable.
|
||||||
t := p.v.Type().Elem()
|
// Returns the (addressable) new element.
|
||||||
switch t {
|
func grow(s reflect.Value) reflect.Value {
|
||||||
case int32Type:
|
n, m := s.Len(), s.Cap()
|
||||||
if len(o.int32s) == 0 {
|
|
||||||
o.int32s = make([]int32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.int32s[0] = int32(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
|
||||||
o.int32s = o.int32s[1:]
|
|
||||||
return
|
|
||||||
case uint32Type:
|
|
||||||
if len(o.uint32s) == 0 {
|
|
||||||
o.uint32s = make([]uint32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.uint32s[0] = x
|
|
||||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
|
||||||
o.uint32s = o.uint32s[1:]
|
|
||||||
return
|
|
||||||
case float32Type:
|
|
||||||
if len(o.float32s) == 0 {
|
|
||||||
o.float32s = make([]float32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.float32s[0] = math.Float32frombits(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
|
||||||
o.float32s = o.float32s[1:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// must be enum
|
|
||||||
p.v.Set(reflect.New(t))
|
|
||||||
p.v.Elem().SetInt(int64(int32(x)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the bits pointed at by p, as a uint32.
|
|
||||||
func word32_Get(p word32) uint32 {
|
|
||||||
elem := p.v.Elem()
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
|
||||||
func structPointer_Word32(p structPointer, f field) word32 {
|
|
||||||
return word32{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
|
||||||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
|
||||||
type word32Val struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets *p to x.
|
|
||||||
func word32Val_Set(p word32Val, x uint32) {
|
|
||||||
switch p.v.Type() {
|
|
||||||
case int32Type:
|
|
||||||
p.v.SetInt(int64(x))
|
|
||||||
return
|
|
||||||
case uint32Type:
|
|
||||||
p.v.SetUint(uint64(x))
|
|
||||||
return
|
|
||||||
case float32Type:
|
|
||||||
p.v.SetFloat(float64(math.Float32frombits(x)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// must be enum
|
|
||||||
p.v.SetInt(int64(int32(x)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the bits pointed at by p, as a uint32.
|
|
||||||
func word32Val_Get(p word32Val) uint32 {
|
|
||||||
elem := p.v
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
|
||||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
|
||||||
return word32Val{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A word32Slice is a slice of 32-bit values.
|
|
||||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
|
||||||
type word32Slice struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word32Slice) Append(x uint32) {
|
|
||||||
n, m := p.v.Len(), p.v.Cap()
|
|
||||||
if n < m {
|
if n < m {
|
||||||
p.v.SetLen(n + 1)
|
s.SetLen(n + 1)
|
||||||
} else {
|
} else {
|
||||||
t := p.v.Type().Elem()
|
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
|
||||||
}
|
}
|
||||||
elem := p.v.Index(n)
|
return s.Index(n)
|
||||||
switch elem.Kind() {
|
}
|
||||||
case reflect.Int32:
|
|
||||||
elem.SetInt(int64(int32(x)))
|
func (p pointer) toInt64() *int64 {
|
||||||
case reflect.Uint32:
|
return p.v.Interface().(*int64)
|
||||||
elem.SetUint(uint64(x))
|
}
|
||||||
case reflect.Float32:
|
func (p pointer) toInt64Ptr() **int64 {
|
||||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
return p.v.Interface().(**int64)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt64Slice() *[]int64 {
|
||||||
|
return p.v.Interface().(*[]int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||||
|
|
||||||
|
func (p pointer) toInt32() *int32 {
|
||||||
|
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||||
|
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||||
|
/*
|
||||||
|
func (p pointer) toInt32Ptr() **int32 {
|
||||||
|
return p.v.Interface().(**int32)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt32Slice() *[]int32 {
|
||||||
|
return p.v.Interface().(*[]int32)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
func (p pointer) getInt32Ptr() *int32 {
|
||||||
|
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||||
|
// raw int32 type
|
||||||
|
return p.v.Elem().Interface().(*int32)
|
||||||
}
|
}
|
||||||
|
// an enum
|
||||||
|
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||||
|
}
|
||||||
|
func (p pointer) setInt32Ptr(v int32) {
|
||||||
|
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||||
|
// Then assign it to a **int32 or **enum.
|
||||||
|
// Note: we can convert *int32 to *enum, but we can't convert
|
||||||
|
// **int32 to **enum!
|
||||||
|
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p word32Slice) Len() int {
|
// getInt32Slice copies []int32 from p as a new slice.
|
||||||
return p.v.Len()
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
}
|
func (p pointer) getInt32Slice() []int32 {
|
||||||
|
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||||
func (p word32Slice) Index(i int) uint32 {
|
// raw int32 type
|
||||||
elem := p.v.Index(i)
|
return p.v.Elem().Interface().([]int32)
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
// an enum
|
||||||
|
// Allocate a []int32, then assign []enum's values into it.
|
||||||
|
// Note: we can't convert []enum to []int32.
|
||||||
|
slice := p.v.Elem()
|
||||||
|
s := make([]int32, slice.Len())
|
||||||
|
for i := 0; i < slice.Len(); i++ {
|
||||||
|
s[i] = int32(slice.Index(i).Int())
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
// setInt32Slice copies []int32 into p as a new slice.
|
||||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
return word32Slice{structPointer_field(p, f)}
|
func (p pointer) setInt32Slice(v []int32) {
|
||||||
}
|
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||||
|
// raw int32 type
|
||||||
// word64 is like word32 but for 64-bit values.
|
p.v.Elem().Set(reflect.ValueOf(v))
|
||||||
type word64 struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
|
||||||
t := p.v.Type().Elem()
|
|
||||||
switch t {
|
|
||||||
case int64Type:
|
|
||||||
if len(o.int64s) == 0 {
|
|
||||||
o.int64s = make([]int64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.int64s[0] = int64(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
|
||||||
o.int64s = o.int64s[1:]
|
|
||||||
return
|
|
||||||
case uint64Type:
|
|
||||||
if len(o.uint64s) == 0 {
|
|
||||||
o.uint64s = make([]uint64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.uint64s[0] = x
|
|
||||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
|
||||||
o.uint64s = o.uint64s[1:]
|
|
||||||
return
|
|
||||||
case float64Type:
|
|
||||||
if len(o.float64s) == 0 {
|
|
||||||
o.float64s = make([]float64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.float64s[0] = math.Float64frombits(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
|
||||||
o.float64s = o.float64s[1:]
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
// an enum
|
||||||
}
|
// Allocate a []enum, then assign []int32's values into it.
|
||||||
|
// Note: we can't convert []enum to []int32.
|
||||||
func word64_IsNil(p word64) bool {
|
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||||
return p.v.IsNil()
|
for i, x := range v {
|
||||||
}
|
slice.Index(i).SetInt(int64(x))
|
||||||
|
|
||||||
func word64_Get(p word64) uint64 {
|
|
||||||
elem := p.v.Elem()
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
return uint64(elem.Int())
|
|
||||||
case reflect.Uint64:
|
|
||||||
return elem.Uint()
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(elem.Float())
|
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
p.v.Elem().Set(slice)
|
||||||
|
}
|
||||||
|
func (p pointer) appendInt32Slice(v int32) {
|
||||||
|
grow(p.v.Elem()).SetInt(int64(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
func structPointer_Word64(p structPointer, f field) word64 {
|
func (p pointer) toUint64() *uint64 {
|
||||||
return word64{structPointer_field(p, f)}
|
return p.v.Interface().(*uint64)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint64Ptr() **uint64 {
|
||||||
|
return p.v.Interface().(**uint64)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint64Slice() *[]uint64 {
|
||||||
|
return p.v.Interface().(*[]uint64)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32() *uint32 {
|
||||||
|
return p.v.Interface().(*uint32)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Ptr() **uint32 {
|
||||||
|
return p.v.Interface().(**uint32)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Slice() *[]uint32 {
|
||||||
|
return p.v.Interface().(*[]uint32)
|
||||||
|
}
|
||||||
|
func (p pointer) toBool() *bool {
|
||||||
|
return p.v.Interface().(*bool)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolPtr() **bool {
|
||||||
|
return p.v.Interface().(**bool)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolSlice() *[]bool {
|
||||||
|
return p.v.Interface().(*[]bool)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64() *float64 {
|
||||||
|
return p.v.Interface().(*float64)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Ptr() **float64 {
|
||||||
|
return p.v.Interface().(**float64)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Slice() *[]float64 {
|
||||||
|
return p.v.Interface().(*[]float64)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32() *float32 {
|
||||||
|
return p.v.Interface().(*float32)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Ptr() **float32 {
|
||||||
|
return p.v.Interface().(**float32)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Slice() *[]float32 {
|
||||||
|
return p.v.Interface().(*[]float32)
|
||||||
|
}
|
||||||
|
func (p pointer) toString() *string {
|
||||||
|
return p.v.Interface().(*string)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringPtr() **string {
|
||||||
|
return p.v.Interface().(**string)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringSlice() *[]string {
|
||||||
|
return p.v.Interface().(*[]string)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytes() *[]byte {
|
||||||
|
return p.v.Interface().(*[]byte)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytesSlice() *[][]byte {
|
||||||
|
return p.v.Interface().(*[][]byte)
|
||||||
|
}
|
||||||
|
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||||
|
return p.v.Interface().(*XXX_InternalExtensions)
|
||||||
|
}
|
||||||
|
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||||
|
return p.v.Interface().(*map[int32]Extension)
|
||||||
|
}
|
||||||
|
func (p pointer) getPointer() pointer {
|
||||||
|
return pointer{v: p.v.Elem()}
|
||||||
|
}
|
||||||
|
func (p pointer) setPointer(q pointer) {
|
||||||
|
p.v.Elem().Set(q.v)
|
||||||
|
}
|
||||||
|
func (p pointer) appendPointer(q pointer) {
|
||||||
|
grow(p.v.Elem()).Set(q.v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// word64Val is like word32Val but for 64-bit values.
|
// getPointerSlice copies []*T from p as a new []pointer.
|
||||||
type word64Val struct {
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
v reflect.Value
|
func (p pointer) getPointerSlice() []pointer {
|
||||||
|
if p.v.IsNil() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
n := p.v.Elem().Len()
|
||||||
|
s := make([]pointer, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
// setPointerSlice copies []pointer into p as a new []*T.
|
||||||
switch p.v.Type() {
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
case int64Type:
|
func (p pointer) setPointerSlice(v []pointer) {
|
||||||
p.v.SetInt(int64(x))
|
if v == nil {
|
||||||
return
|
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||||
case uint64Type:
|
|
||||||
p.v.SetUint(x)
|
|
||||||
return
|
|
||||||
case float64Type:
|
|
||||||
p.v.SetFloat(math.Float64frombits(x))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||||
}
|
for _, p := range v {
|
||||||
|
s = reflect.Append(s, p.v)
|
||||||
func word64Val_Get(p word64Val) uint64 {
|
|
||||||
elem := p.v
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
return uint64(elem.Int())
|
|
||||||
case reflect.Uint64:
|
|
||||||
return elem.Uint()
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(elem.Float())
|
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
p.v.Elem().Set(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
// getInterfacePointer returns a pointer that points to the
|
||||||
return word64Val{structPointer_field(p, f)}
|
// interface data of the interface pointed by p.
|
||||||
}
|
func (p pointer) getInterfacePointer() pointer {
|
||||||
|
if p.v.Elem().IsNil() {
|
||||||
type word64Slice struct {
|
return pointer{v: p.v.Elem()}
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word64Slice) Append(x uint64) {
|
|
||||||
n, m := p.v.Len(), p.v.Cap()
|
|
||||||
if n < m {
|
|
||||||
p.v.SetLen(n + 1)
|
|
||||||
} else {
|
|
||||||
t := p.v.Type().Elem()
|
|
||||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
|
||||||
}
|
|
||||||
elem := p.v.Index(n)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
elem.SetInt(int64(int64(x)))
|
|
||||||
case reflect.Uint64:
|
|
||||||
elem.SetUint(uint64(x))
|
|
||||||
case reflect.Float64:
|
|
||||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
|
||||||
}
|
}
|
||||||
|
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p word64Slice) Len() int {
|
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||||
return p.v.Len()
|
// TODO: check that p.v.Type().Elem() == t?
|
||||||
|
return p.v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p word64Slice) Index(i int) uint64 {
|
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||||
elem := p.v.Index(i)
|
atomicLock.Lock()
|
||||||
switch elem.Kind() {
|
defer atomicLock.Unlock()
|
||||||
case reflect.Int64:
|
return *p
|
||||||
return uint64(elem.Int())
|
}
|
||||||
case reflect.Uint64:
|
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||||
return uint64(elem.Uint())
|
atomicLock.Lock()
|
||||||
case reflect.Float64:
|
defer atomicLock.Unlock()
|
||||||
return math.Float64bits(float64(elem.Float()))
|
*p = v
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
*p = v
|
||||||
|
}
|
||||||
|
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
*p = v
|
||||||
|
}
|
||||||
|
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
*p = v
|
||||||
}
|
}
|
||||||
|
|
||||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
var atomicLock sync.Mutex
|
||||||
return word64Slice{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
402
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
402
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
@ -29,7 +29,7 @@
|
|||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
// +build !appengine,!js
|
// +build !purego,!appengine,!js
|
||||||
|
|
||||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||||
|
|
||||||
@ -37,38 +37,13 @@ package proto
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync/atomic"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
const unsafeAllowed = true
|
||||||
// but Go does not allow methods on pointer types, and we must preserve
|
|
||||||
// some pointer type for the garbage collector. We use these
|
|
||||||
// funcs with clunky names as our poor approximation to methods.
|
|
||||||
//
|
|
||||||
// An alternative would be
|
|
||||||
// type structPointer struct { p unsafe.Pointer }
|
|
||||||
// but that does not registerize as well.
|
|
||||||
|
|
||||||
// A structPointer is a pointer to a struct.
|
// A field identifies a field in a struct, accessible from a pointer.
|
||||||
type structPointer unsafe.Pointer
|
|
||||||
|
|
||||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
|
||||||
func toStructPointer(v reflect.Value) structPointer {
|
|
||||||
return structPointer(unsafe.Pointer(v.Pointer()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
|
||||||
func structPointer_IsNil(p structPointer) bool {
|
|
||||||
return p == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interface returns the struct pointer, assumed to have element type t,
|
|
||||||
// as an interface value.
|
|
||||||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
|
||||||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a structPointer.
|
|
||||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||||
type field uintptr
|
type field uintptr
|
||||||
|
|
||||||
@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field {
|
|||||||
// invalidField is an invalid field identifier.
|
// invalidField is an invalid field identifier.
|
||||||
const invalidField = ^field(0)
|
const invalidField = ^field(0)
|
||||||
|
|
||||||
|
// zeroField is a noop when calling pointer.offset.
|
||||||
|
const zeroField = field(0)
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
// IsValid reports whether the field identifier is valid.
|
||||||
func (f field) IsValid() bool {
|
func (f field) IsValid() bool {
|
||||||
return f != ^field(0)
|
return f != invalidField
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns the address of a []byte field in the struct.
|
// The pointer type below is for the new table-driven encoder/decoder.
|
||||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||||
|
// the same (but slower) interface.
|
||||||
|
type pointer struct {
|
||||||
|
p unsafe.Pointer
|
||||||
}
|
}
|
||||||
|
|
||||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
// size of pointer
|
||||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
|
// toPointer converts an interface of pointer type to a pointer
|
||||||
|
// that points to the same target.
|
||||||
|
func toPointer(i *Message) pointer {
|
||||||
|
// Super-tricky - read pointer out of data word of interface value.
|
||||||
|
// Saves ~25ns over the equivalent:
|
||||||
|
// return valToPointer(reflect.ValueOf(*i))
|
||||||
|
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bool returns the address of a *bool field in the struct.
|
// toAddrPointer converts an interface to a pointer that points to
|
||||||
func structPointer_Bool(p structPointer, f field) **bool {
|
// the interface data.
|
||||||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||||
}
|
// Super-tricky - read or get the address of data word of interface value.
|
||||||
|
if isptr {
|
||||||
// BoolVal returns the address of a bool field in the struct.
|
// The interface is of pointer type, thus it is a direct interface.
|
||||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
// The data word is the pointer data itself. We take its address.
|
||||||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||||
}
|
|
||||||
|
|
||||||
// BoolSlice returns the address of a []bool field in the struct.
|
|
||||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
|
||||||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the address of a *string field in the struct.
|
|
||||||
func structPointer_String(p structPointer, f field) **string {
|
|
||||||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringVal returns the address of a string field in the struct.
|
|
||||||
func structPointer_StringVal(p structPointer, f field) *string {
|
|
||||||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringSlice returns the address of a []string field in the struct.
|
|
||||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|
||||||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtMap returns the address of an extension map field in the struct.
|
|
||||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
|
||||||
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
|
||||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
|
||||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
|
||||||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStructPointer writes a *struct field in the struct.
|
|
||||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
|
||||||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStructPointer reads a *struct field in the struct.
|
|
||||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
|
||||||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructPointerSlice the address of a []*struct field in the struct.
|
|
||||||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
|
||||||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
|
||||||
type structPointerSlice []structPointer
|
|
||||||
|
|
||||||
func (v *structPointerSlice) Len() int { return len(*v) }
|
|
||||||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
|
||||||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
|
||||||
|
|
||||||
// A word32 is the address of a "pointer to 32-bit value" field.
|
|
||||||
type word32 **uint32
|
|
||||||
|
|
||||||
// IsNil reports whether *v is nil.
|
|
||||||
func word32_IsNil(p word32) bool {
|
|
||||||
return *p == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets *v to point at a newly allocated word set to x.
|
|
||||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
|
||||||
if len(o.uint32s) == 0 {
|
|
||||||
o.uint32s = make([]uint32, uint32PoolSize)
|
|
||||||
}
|
}
|
||||||
o.uint32s[0] = x
|
// The interface is not of pointer type. The data word is the pointer
|
||||||
*p = &o.uint32s[0]
|
// to the data.
|
||||||
o.uint32s = o.uint32s[1:]
|
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets the value pointed at by *v.
|
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||||
func word32_Get(p word32) uint32 {
|
func valToPointer(v reflect.Value) pointer {
|
||||||
return **p
|
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
// offset converts from a pointer to a structure to a pointer to
|
||||||
func structPointer_Word32(p structPointer, f field) word32 {
|
// one of its fields.
|
||||||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
func (p pointer) offset(f field) pointer {
|
||||||
|
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||||
|
// this to no longer be inlineable, which is a serious performance cost.
|
||||||
|
/*
|
||||||
|
if !f.IsValid() {
|
||||||
|
panic("invalid field")
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A word32Val is the address of a 32-bit value field.
|
func (p pointer) isNil() bool {
|
||||||
type word32Val *uint32
|
return p.p == nil
|
||||||
|
|
||||||
// Set sets *p to x.
|
|
||||||
func word32Val_Set(p word32Val, x uint32) {
|
|
||||||
*p = x
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets the value pointed at by p.
|
func (p pointer) toInt64() *int64 {
|
||||||
func word32Val_Get(p word32Val) uint32 {
|
return (*int64)(p.p)
|
||||||
return *p
|
}
|
||||||
|
func (p pointer) toInt64Ptr() **int64 {
|
||||||
|
return (**int64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt64Slice() *[]int64 {
|
||||||
|
return (*[]int64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt32() *int32 {
|
||||||
|
return (*int32)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
/*
|
||||||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
func (p pointer) toInt32Ptr() **int32 {
|
||||||
}
|
return (**int32)(p.p)
|
||||||
|
|
||||||
// A word32Slice is a slice of 32-bit values.
|
|
||||||
type word32Slice []uint32
|
|
||||||
|
|
||||||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
|
||||||
func (v *word32Slice) Len() int { return len(*v) }
|
|
||||||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
|
||||||
|
|
||||||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
|
||||||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
|
||||||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// word64 is like word32 but for 64-bit values.
|
|
||||||
type word64 **uint64
|
|
||||||
|
|
||||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
|
||||||
if len(o.uint64s) == 0 {
|
|
||||||
o.uint64s = make([]uint64, uint64PoolSize)
|
|
||||||
}
|
}
|
||||||
o.uint64s[0] = x
|
func (p pointer) toInt32Slice() *[]int32 {
|
||||||
*p = &o.uint64s[0]
|
return (*[]int32)(p.p)
|
||||||
o.uint64s = o.uint64s[1:]
|
}
|
||||||
|
*/
|
||||||
|
func (p pointer) getInt32Ptr() *int32 {
|
||||||
|
return *(**int32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) setInt32Ptr(v int32) {
|
||||||
|
*(**int32)(p.p) = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64_IsNil(p word64) bool {
|
// getInt32Slice loads a []int32 from p.
|
||||||
return *p == nil
|
// The value returned is aliased with the original slice.
|
||||||
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
|
func (p pointer) getInt32Slice() []int32 {
|
||||||
|
return *(*[]int32)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64_Get(p word64) uint64 {
|
// setInt32Slice stores a []int32 to p.
|
||||||
return **p
|
// The value set is aliased with the input slice.
|
||||||
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
|
func (p pointer) setInt32Slice(v []int32) {
|
||||||
|
*(*[]int32)(p.p) = v
|
||||||
}
|
}
|
||||||
|
|
||||||
func structPointer_Word64(p structPointer, f field) word64 {
|
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
func (p pointer) appendInt32Slice(v int32) {
|
||||||
|
s := (*[]int32)(p.p)
|
||||||
|
*s = append(*s, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// word64Val is like word32Val but for 64-bit values.
|
func (p pointer) toUint64() *uint64 {
|
||||||
type word64Val *uint64
|
return (*uint64)(p.p)
|
||||||
|
}
|
||||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
func (p pointer) toUint64Ptr() **uint64 {
|
||||||
*p = x
|
return (**uint64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint64Slice() *[]uint64 {
|
||||||
|
return (*[]uint64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32() *uint32 {
|
||||||
|
return (*uint32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Ptr() **uint32 {
|
||||||
|
return (**uint32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Slice() *[]uint32 {
|
||||||
|
return (*[]uint32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBool() *bool {
|
||||||
|
return (*bool)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolPtr() **bool {
|
||||||
|
return (**bool)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolSlice() *[]bool {
|
||||||
|
return (*[]bool)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64() *float64 {
|
||||||
|
return (*float64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Ptr() **float64 {
|
||||||
|
return (**float64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Slice() *[]float64 {
|
||||||
|
return (*[]float64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32() *float32 {
|
||||||
|
return (*float32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Ptr() **float32 {
|
||||||
|
return (**float32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Slice() *[]float32 {
|
||||||
|
return (*[]float32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toString() *string {
|
||||||
|
return (*string)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringPtr() **string {
|
||||||
|
return (**string)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringSlice() *[]string {
|
||||||
|
return (*[]string)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytes() *[]byte {
|
||||||
|
return (*[]byte)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytesSlice() *[][]byte {
|
||||||
|
return (*[][]byte)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||||
|
return (*XXX_InternalExtensions)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||||
|
return (*map[int32]Extension)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64Val_Get(p word64Val) uint64 {
|
// getPointerSlice loads []*T from p as a []pointer.
|
||||||
return *p
|
// The value returned is aliased with the original slice.
|
||||||
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
|
func (p pointer) getPointerSlice() []pointer {
|
||||||
|
// Super-tricky - p should point to a []*T where T is a
|
||||||
|
// message type. We load it as []pointer.
|
||||||
|
return *(*[]pointer)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
// setPointerSlice stores []pointer into p as a []*T.
|
||||||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
// The value set is aliased with the input slice.
|
||||||
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
|
func (p pointer) setPointerSlice(v []pointer) {
|
||||||
|
// Super-tricky - p should point to a []*T where T is a
|
||||||
|
// message type. We store it as []pointer.
|
||||||
|
*(*[]pointer)(p.p) = v
|
||||||
}
|
}
|
||||||
|
|
||||||
// word64Slice is like word32Slice but for 64-bit values.
|
// getPointer loads the pointer at p and returns it.
|
||||||
type word64Slice []uint64
|
func (p pointer) getPointer() pointer {
|
||||||
|
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
}
|
||||||
func (v *word64Slice) Len() int { return len(*v) }
|
|
||||||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
// setPointer stores the pointer q at p.
|
||||||
|
func (p pointer) setPointer(q pointer) {
|
||||||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
*(*unsafe.Pointer)(p.p) = q.p
|
||||||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
}
|
||||||
|
|
||||||
|
// append q to the slice pointed to by p.
|
||||||
|
func (p pointer) appendPointer(q pointer) {
|
||||||
|
s := (*[]unsafe.Pointer)(p.p)
|
||||||
|
*s = append(*s, q.p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getInterfacePointer returns a pointer that points to the
|
||||||
|
// interface data of the interface pointed by p.
|
||||||
|
func (p pointer) getInterfacePointer() pointer {
|
||||||
|
// Super-tricky - read pointer out of data word of interface value.
|
||||||
|
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||||
|
}
|
||||||
|
|
||||||
|
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||||
|
// object of type t stored at p.
|
||||||
|
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||||
|
return reflect.NewAt(t, p.p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||||
|
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
|
}
|
||||||
|
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||||
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
|
}
|
||||||
|
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||||
|
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
|
}
|
||||||
|
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||||
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
|
}
|
||||||
|
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||||
|
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
|
}
|
||||||
|
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||||
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
|
}
|
||||||
|
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||||
|
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
|
}
|
||||||
|
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||||
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
}
|
}
|
||||||
|
422
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
422
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@ -58,42 +58,6 @@ const (
|
|||||||
WireFixed32 = 5
|
WireFixed32 = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
const startSize = 10 // initial slice/string sizes
|
|
||||||
|
|
||||||
// Encoders are defined in encode.go
|
|
||||||
// An encoder outputs the full representation of a field, including its
|
|
||||||
// tag and encoder type.
|
|
||||||
type encoder func(p *Buffer, prop *Properties, base structPointer) error
|
|
||||||
|
|
||||||
// A valueEncoder encodes a single integer in a particular encoding.
|
|
||||||
type valueEncoder func(o *Buffer, x uint64) error
|
|
||||||
|
|
||||||
// Sizers are defined in encode.go
|
|
||||||
// A sizer returns the encoded size of a field, including its tag and encoder
|
|
||||||
// type.
|
|
||||||
type sizer func(prop *Properties, base structPointer) int
|
|
||||||
|
|
||||||
// A valueSizer returns the encoded size of a single integer in a particular
|
|
||||||
// encoding.
|
|
||||||
type valueSizer func(x uint64) int
|
|
||||||
|
|
||||||
// Decoders are defined in decode.go
|
|
||||||
// A decoder creates a value from its wire representation.
|
|
||||||
// Unrecognized subelements are saved in unrec.
|
|
||||||
type decoder func(p *Buffer, prop *Properties, base structPointer) error
|
|
||||||
|
|
||||||
// A valueDecoder decodes a single integer in a particular encoding.
|
|
||||||
type valueDecoder func(o *Buffer) (x uint64, err error)
|
|
||||||
|
|
||||||
// A oneofMarshaler does the marshaling for all oneof fields in a message.
|
|
||||||
type oneofMarshaler func(Message, *Buffer) error
|
|
||||||
|
|
||||||
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
|
|
||||||
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
|
|
||||||
|
|
||||||
// A oneofSizer does the sizing for all oneof fields in a message.
|
|
||||||
type oneofSizer func(Message) int
|
|
||||||
|
|
||||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||||
// numbers.
|
// numbers.
|
||||||
@ -140,13 +104,6 @@ type StructProperties struct {
|
|||||||
decoderTags tagMap // map from proto tag to struct field number
|
decoderTags tagMap // map from proto tag to struct field number
|
||||||
decoderOrigNames map[string]int // map from original name to struct field number
|
decoderOrigNames map[string]int // map from original name to struct field number
|
||||||
order []int // list of struct field numbers in tag order
|
order []int // list of struct field numbers in tag order
|
||||||
unrecField field // field id of the XXX_unrecognized []byte field
|
|
||||||
extendable bool // is this an extendable proto
|
|
||||||
|
|
||||||
oneofMarshaler oneofMarshaler
|
|
||||||
oneofUnmarshaler oneofUnmarshaler
|
|
||||||
oneofSizer oneofSizer
|
|
||||||
stype reflect.Type
|
|
||||||
|
|
||||||
// OneofTypes contains information about the oneof fields in this message.
|
// OneofTypes contains information about the oneof fields in this message.
|
||||||
// It is keyed by the original name of a field.
|
// It is keyed by the original name of a field.
|
||||||
@ -187,36 +144,19 @@ type Properties struct {
|
|||||||
|
|
||||||
Default string // default value
|
Default string // default value
|
||||||
HasDefault bool // whether an explicit default was provided
|
HasDefault bool // whether an explicit default was provided
|
||||||
def_uint64 uint64
|
|
||||||
|
|
||||||
enc encoder
|
stype reflect.Type // set for struct types only
|
||||||
valEnc valueEncoder // set for bool and numeric types only
|
sprop *StructProperties // set for struct types only
|
||||||
field field
|
|
||||||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
|
||||||
tagbuf [8]byte
|
|
||||||
stype reflect.Type // set for struct types only
|
|
||||||
sprop *StructProperties // set for struct types only
|
|
||||||
isMarshaler bool
|
|
||||||
isUnmarshaler bool
|
|
||||||
|
|
||||||
mtype reflect.Type // set for map types only
|
mtype reflect.Type // set for map types only
|
||||||
mkeyprop *Properties // set for map types only
|
mkeyprop *Properties // set for map types only
|
||||||
mvalprop *Properties // set for map types only
|
mvalprop *Properties // set for map types only
|
||||||
|
|
||||||
size sizer
|
|
||||||
valSize valueSizer // set for bool and numeric types only
|
|
||||||
|
|
||||||
dec decoder
|
|
||||||
valDec valueDecoder // set for bool and numeric types only
|
|
||||||
|
|
||||||
// If this is a packable field, this will be the decoder for the packed version of the field.
|
|
||||||
packedDec decoder
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String formats the properties in the protobuf struct field tag style.
|
// String formats the properties in the protobuf struct field tag style.
|
||||||
func (p *Properties) String() string {
|
func (p *Properties) String() string {
|
||||||
s := p.Wire
|
s := p.Wire
|
||||||
s = ","
|
s += ","
|
||||||
s += strconv.Itoa(p.Tag)
|
s += strconv.Itoa(p.Tag)
|
||||||
if p.Required {
|
if p.Required {
|
||||||
s += ",req"
|
s += ",req"
|
||||||
@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) {
|
|||||||
switch p.Wire {
|
switch p.Wire {
|
||||||
case "varint":
|
case "varint":
|
||||||
p.WireType = WireVarint
|
p.WireType = WireVarint
|
||||||
p.valEnc = (*Buffer).EncodeVarint
|
|
||||||
p.valDec = (*Buffer).DecodeVarint
|
|
||||||
p.valSize = sizeVarint
|
|
||||||
case "fixed32":
|
case "fixed32":
|
||||||
p.WireType = WireFixed32
|
p.WireType = WireFixed32
|
||||||
p.valEnc = (*Buffer).EncodeFixed32
|
|
||||||
p.valDec = (*Buffer).DecodeFixed32
|
|
||||||
p.valSize = sizeFixed32
|
|
||||||
case "fixed64":
|
case "fixed64":
|
||||||
p.WireType = WireFixed64
|
p.WireType = WireFixed64
|
||||||
p.valEnc = (*Buffer).EncodeFixed64
|
|
||||||
p.valDec = (*Buffer).DecodeFixed64
|
|
||||||
p.valSize = sizeFixed64
|
|
||||||
case "zigzag32":
|
case "zigzag32":
|
||||||
p.WireType = WireVarint
|
p.WireType = WireVarint
|
||||||
p.valEnc = (*Buffer).EncodeZigzag32
|
|
||||||
p.valDec = (*Buffer).DecodeZigzag32
|
|
||||||
p.valSize = sizeZigzag32
|
|
||||||
case "zigzag64":
|
case "zigzag64":
|
||||||
p.WireType = WireVarint
|
p.WireType = WireVarint
|
||||||
p.valEnc = (*Buffer).EncodeZigzag64
|
|
||||||
p.valDec = (*Buffer).DecodeZigzag64
|
|
||||||
p.valSize = sizeZigzag64
|
|
||||||
case "bytes", "group":
|
case "bytes", "group":
|
||||||
p.WireType = WireBytes
|
p.WireType = WireBytes
|
||||||
// no numeric converter for non-numeric types
|
// no numeric converter for non-numeric types
|
||||||
@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
outer:
|
||||||
for i := 2; i < len(fields); i++ {
|
for i := 2; i < len(fields); i++ {
|
||||||
f := fields[i]
|
f := fields[i]
|
||||||
switch {
|
switch {
|
||||||
@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) {
|
|||||||
if i+1 < len(fields) {
|
if i+1 < len(fields) {
|
||||||
// Commas aren't escaped, and def is always last.
|
// Commas aren't escaped, and def is always last.
|
||||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||||
break
|
break outer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func logNoSliceEnc(t1, t2 reflect.Type) {
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
|
|
||||||
}
|
|
||||||
|
|
||||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||||
|
|
||||||
// Initialize the fields for encoding and decoding.
|
// setFieldProps initializes the field properties for submessages and maps.
|
||||||
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||||
p.enc = nil
|
|
||||||
p.dec = nil
|
|
||||||
p.size = nil
|
|
||||||
|
|
||||||
switch t1 := typ; t1.Kind() {
|
switch t1 := typ; t1.Kind() {
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
|
||||||
|
|
||||||
// proto3 scalar types
|
|
||||||
|
|
||||||
case reflect.Bool:
|
|
||||||
p.enc = (*Buffer).enc_proto3_bool
|
|
||||||
p.dec = (*Buffer).dec_proto3_bool
|
|
||||||
p.size = size_proto3_bool
|
|
||||||
case reflect.Int32:
|
|
||||||
p.enc = (*Buffer).enc_proto3_int32
|
|
||||||
p.dec = (*Buffer).dec_proto3_int32
|
|
||||||
p.size = size_proto3_int32
|
|
||||||
case reflect.Uint32:
|
|
||||||
p.enc = (*Buffer).enc_proto3_uint32
|
|
||||||
p.dec = (*Buffer).dec_proto3_int32 // can reuse
|
|
||||||
p.size = size_proto3_uint32
|
|
||||||
case reflect.Int64, reflect.Uint64:
|
|
||||||
p.enc = (*Buffer).enc_proto3_int64
|
|
||||||
p.dec = (*Buffer).dec_proto3_int64
|
|
||||||
p.size = size_proto3_int64
|
|
||||||
case reflect.Float32:
|
|
||||||
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_proto3_int32
|
|
||||||
p.size = size_proto3_uint32
|
|
||||||
case reflect.Float64:
|
|
||||||
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_proto3_int64
|
|
||||||
p.size = size_proto3_int64
|
|
||||||
case reflect.String:
|
|
||||||
p.enc = (*Buffer).enc_proto3_string
|
|
||||||
p.dec = (*Buffer).dec_proto3_string
|
|
||||||
p.size = size_proto3_string
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
switch t2 := t1.Elem(); t2.Kind() {
|
if t1.Elem().Kind() == reflect.Struct {
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
|
|
||||||
break
|
|
||||||
case reflect.Bool:
|
|
||||||
p.enc = (*Buffer).enc_bool
|
|
||||||
p.dec = (*Buffer).dec_bool
|
|
||||||
p.size = size_bool
|
|
||||||
case reflect.Int32:
|
|
||||||
p.enc = (*Buffer).enc_int32
|
|
||||||
p.dec = (*Buffer).dec_int32
|
|
||||||
p.size = size_int32
|
|
||||||
case reflect.Uint32:
|
|
||||||
p.enc = (*Buffer).enc_uint32
|
|
||||||
p.dec = (*Buffer).dec_int32 // can reuse
|
|
||||||
p.size = size_uint32
|
|
||||||
case reflect.Int64, reflect.Uint64:
|
|
||||||
p.enc = (*Buffer).enc_int64
|
|
||||||
p.dec = (*Buffer).dec_int64
|
|
||||||
p.size = size_int64
|
|
||||||
case reflect.Float32:
|
|
||||||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_int32
|
|
||||||
p.size = size_uint32
|
|
||||||
case reflect.Float64:
|
|
||||||
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_int64
|
|
||||||
p.size = size_int64
|
|
||||||
case reflect.String:
|
|
||||||
p.enc = (*Buffer).enc_string
|
|
||||||
p.dec = (*Buffer).dec_string
|
|
||||||
p.size = size_string
|
|
||||||
case reflect.Struct:
|
|
||||||
p.stype = t1.Elem()
|
p.stype = t1.Elem()
|
||||||
p.isMarshaler = isMarshaler(t1)
|
|
||||||
p.isUnmarshaler = isUnmarshaler(t1)
|
|
||||||
if p.Wire == "bytes" {
|
|
||||||
p.enc = (*Buffer).enc_struct_message
|
|
||||||
p.dec = (*Buffer).dec_struct_message
|
|
||||||
p.size = size_struct_message
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_struct_group
|
|
||||||
p.dec = (*Buffer).dec_struct_group
|
|
||||||
p.size = size_struct_group
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
switch t2 := t1.Elem(); t2.Kind() {
|
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||||
default:
|
p.stype = t2.Elem()
|
||||||
logNoSliceEnc(t1, t2)
|
|
||||||
break
|
|
||||||
case reflect.Bool:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_bool
|
|
||||||
p.size = size_slice_packed_bool
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_bool
|
|
||||||
p.size = size_slice_bool
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_bool
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_bool
|
|
||||||
case reflect.Int32:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_int32
|
|
||||||
p.size = size_slice_packed_int32
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_int32
|
|
||||||
p.size = size_slice_int32
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int32
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
|
||||||
case reflect.Uint32:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
|
||||||
p.size = size_slice_packed_uint32
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_uint32
|
|
||||||
p.size = size_slice_uint32
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int32
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
|
||||||
case reflect.Int64, reflect.Uint64:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_int64
|
|
||||||
p.size = size_slice_packed_int64
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_int64
|
|
||||||
p.size = size_slice_int64
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int64
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
|
||||||
case reflect.Uint8:
|
|
||||||
p.dec = (*Buffer).dec_slice_byte
|
|
||||||
if p.proto3 {
|
|
||||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
|
||||||
p.size = size_proto3_slice_byte
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_byte
|
|
||||||
p.size = size_slice_byte
|
|
||||||
}
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
switch t2.Bits() {
|
|
||||||
case 32:
|
|
||||||
// can just treat them as bits
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
|
||||||
p.size = size_slice_packed_uint32
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_uint32
|
|
||||||
p.size = size_slice_uint32
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int32
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
|
||||||
case 64:
|
|
||||||
// can just treat them as bits
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_int64
|
|
||||||
p.size = size_slice_packed_int64
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_int64
|
|
||||||
p.size = size_slice_int64
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int64
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
|
||||||
default:
|
|
||||||
logNoSliceEnc(t1, t2)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
p.enc = (*Buffer).enc_slice_string
|
|
||||||
p.dec = (*Buffer).dec_slice_string
|
|
||||||
p.size = size_slice_string
|
|
||||||
case reflect.Ptr:
|
|
||||||
switch t3 := t2.Elem(); t3.Kind() {
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
|
|
||||||
break
|
|
||||||
case reflect.Struct:
|
|
||||||
p.stype = t2.Elem()
|
|
||||||
p.isMarshaler = isMarshaler(t2)
|
|
||||||
p.isUnmarshaler = isUnmarshaler(t2)
|
|
||||||
if p.Wire == "bytes" {
|
|
||||||
p.enc = (*Buffer).enc_slice_struct_message
|
|
||||||
p.dec = (*Buffer).dec_slice_struct_message
|
|
||||||
p.size = size_slice_struct_message
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_struct_group
|
|
||||||
p.dec = (*Buffer).dec_slice_struct_group
|
|
||||||
p.size = size_slice_struct_group
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
switch t2.Elem().Kind() {
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
|
|
||||||
break
|
|
||||||
case reflect.Uint8:
|
|
||||||
p.enc = (*Buffer).enc_slice_slice_byte
|
|
||||||
p.dec = (*Buffer).dec_slice_slice_byte
|
|
||||||
p.size = size_slice_slice_byte
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
p.enc = (*Buffer).enc_new_map
|
|
||||||
p.dec = (*Buffer).dec_new_map
|
|
||||||
p.size = size_new_map
|
|
||||||
|
|
||||||
p.mtype = t1
|
p.mtype = t1
|
||||||
p.mkeyprop = &Properties{}
|
p.mkeyprop = &Properties{}
|
||||||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||||
@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// precalculate tag code
|
|
||||||
wire := p.WireType
|
|
||||||
if p.Packed {
|
|
||||||
wire = WireBytes
|
|
||||||
}
|
|
||||||
x := uint32(p.Tag)<<3 | uint32(wire)
|
|
||||||
i := 0
|
|
||||||
for i = 0; x > 127; i++ {
|
|
||||||
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
|
|
||||||
x >>= 7
|
|
||||||
}
|
|
||||||
p.tagbuf[i] = uint8(x)
|
|
||||||
p.tagcode = p.tagbuf[0 : i+1]
|
|
||||||
|
|
||||||
if p.stype != nil {
|
if p.stype != nil {
|
||||||
if lockGetProp {
|
if lockGetProp {
|
||||||
p.sprop = GetProperties(p.stype)
|
p.sprop = GetProperties(p.stype)
|
||||||
@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// isMarshaler reports whether type t implements Marshaler.
|
|
||||||
func isMarshaler(t reflect.Type) bool {
|
|
||||||
// We're checking for (likely) pointer-receiver methods
|
|
||||||
// so if t is not a pointer, something is very wrong.
|
|
||||||
// The calls above only invoke isMarshaler on pointer types.
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
panic("proto: misuse of isMarshaler")
|
|
||||||
}
|
|
||||||
return t.Implements(marshalerType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isUnmarshaler reports whether type t implements Unmarshaler.
|
|
||||||
func isUnmarshaler(t reflect.Type) bool {
|
|
||||||
// We're checking for (likely) pointer-receiver methods
|
|
||||||
// so if t is not a pointer, something is very wrong.
|
|
||||||
// The calls above only invoke isUnmarshaler on pointer types.
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
panic("proto: misuse of isUnmarshaler")
|
|
||||||
}
|
|
||||||
return t.Implements(unmarshalerType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init populates the properties from a protocol buffer struct tag.
|
// Init populates the properties from a protocol buffer struct tag.
|
||||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||||
p.init(typ, name, tag, f, true)
|
p.init(typ, name, tag, f, true)
|
||||||
@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
|
|||||||
// "bytes,49,opt,def=hello!"
|
// "bytes,49,opt,def=hello!"
|
||||||
p.Name = name
|
p.Name = name
|
||||||
p.OrigName = name
|
p.OrigName = name
|
||||||
if f != nil {
|
|
||||||
p.field = toField(f)
|
|
||||||
}
|
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.Parse(tag)
|
p.Parse(tag)
|
||||||
p.setEncAndDec(typ, f, lockGetProp)
|
p.setFieldProps(typ, f, lockGetProp)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
propertiesMap[t] = prop
|
propertiesMap[t] = prop
|
||||||
|
|
||||||
// build properties
|
// build properties
|
||||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
|
|
||||||
reflect.PtrTo(t).Implements(extendableProtoV1Type)
|
|
||||||
prop.unrecField = invalidField
|
|
||||||
prop.Prop = make([]*Properties, t.NumField())
|
prop.Prop = make([]*Properties, t.NumField())
|
||||||
prop.order = make([]int, t.NumField())
|
prop.order = make([]int, t.NumField())
|
||||||
|
|
||||||
@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
name := f.Name
|
name := f.Name
|
||||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||||
|
|
||||||
if f.Name == "XXX_InternalExtensions" { // special case
|
|
||||||
p.enc = (*Buffer).enc_exts
|
|
||||||
p.dec = nil // not needed
|
|
||||||
p.size = size_exts
|
|
||||||
} else if f.Name == "XXX_extensions" { // special case
|
|
||||||
p.enc = (*Buffer).enc_map
|
|
||||||
p.dec = nil // not needed
|
|
||||||
p.size = size_map
|
|
||||||
} else if f.Name == "XXX_unrecognized" { // special case
|
|
||||||
prop.unrecField = toField(&f)
|
|
||||||
}
|
|
||||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||||
if oneof != "" {
|
if oneof != "" {
|
||||||
// Oneof fields don't use the traditional protobuf tag.
|
// Oneof fields don't use the traditional protobuf tag.
|
||||||
@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
}
|
}
|
||||||
print("\n")
|
print("\n")
|
||||||
}
|
}
|
||||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
|
|
||||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-order prop.order.
|
// Re-order prop.order.
|
||||||
@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
}
|
}
|
||||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||||
var oots []interface{}
|
var oots []interface{}
|
||||||
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
|
_, _, _, oots = om.XXX_OneofFuncs()
|
||||||
prop.stype = t
|
|
||||||
|
|
||||||
// Interpret oneof metadata.
|
// Interpret oneof metadata.
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||||
@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the Properties object for the x[0]'th field of the structure.
|
|
||||||
func propByIndex(t reflect.Type, x []int) *Properties {
|
|
||||||
if len(x) != 1 {
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
prop := GetProperties(t)
|
|
||||||
return prop.Prop[x[0]]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the address and type of a pointer to a struct from an interface.
|
|
||||||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
|
|
||||||
if pb == nil {
|
|
||||||
err = ErrNil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// get the reflect type of the pointer to the struct.
|
|
||||||
t = reflect.TypeOf(pb)
|
|
||||||
// get the address of the struct.
|
|
||||||
value := reflect.ValueOf(pb)
|
|
||||||
b = toStructPointer(value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// A global registry of enum types.
|
// A global registry of enum types.
|
||||||
// The generated code will register the generated maps by calling RegisterEnum.
|
// The generated code will register the generated maps by calling RegisterEnum.
|
||||||
|
|
||||||
@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 {
|
|||||||
// A registry of all linked message types.
|
// A registry of all linked message types.
|
||||||
// The string is a fully-qualified proto name ("pkg.Message").
|
// The string is a fully-qualified proto name ("pkg.Message").
|
||||||
var (
|
var (
|
||||||
protoTypes = make(map[string]reflect.Type)
|
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||||
revProtoTypes = make(map[reflect.Type]string)
|
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||||
|
revProtoTypes = make(map[reflect.Type]string)
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterType is called from generated code and maps from the fully qualified
|
// RegisterType is called from generated code and maps from the fully qualified
|
||||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||||
func RegisterType(x Message, name string) {
|
func RegisterType(x Message, name string) {
|
||||||
if _, ok := protoTypes[name]; ok {
|
if _, ok := protoTypedNils[name]; ok {
|
||||||
// TODO: Some day, make this a panic.
|
// TODO: Some day, make this a panic.
|
||||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t := reflect.TypeOf(x)
|
t := reflect.TypeOf(x)
|
||||||
protoTypes[name] = t
|
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||||
|
// Generated code always calls RegisterType with nil x.
|
||||||
|
// This check is just for extra safety.
|
||||||
|
protoTypedNils[name] = x
|
||||||
|
} else {
|
||||||
|
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||||
|
}
|
||||||
|
revProtoTypes[t] = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||||
|
// proto name to the native map type of the proto map definition.
|
||||||
|
func RegisterMapType(x interface{}, name string) {
|
||||||
|
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||||
|
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||||
|
}
|
||||||
|
if _, ok := protoMapTypes[name]; ok {
|
||||||
|
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := reflect.TypeOf(x)
|
||||||
|
protoMapTypes[name] = t
|
||||||
revProtoTypes[t] = name
|
revProtoTypes[t] = name
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -855,7 +520,14 @@ func MessageName(x Message) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MessageType returns the message type (pointer to struct) for a named message.
|
// MessageType returns the message type (pointer to struct) for a named message.
|
||||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||||
|
// map entry.
|
||||||
|
func MessageType(name string) reflect.Type {
|
||||||
|
if t, ok := protoTypedNils[name]; ok {
|
||||||
|
return reflect.TypeOf(t)
|
||||||
|
}
|
||||||
|
return protoMapTypes[name]
|
||||||
|
}
|
||||||
|
|
||||||
// A registry of all linked proto files.
|
// A registry of all linked proto files.
|
||||||
var (
|
var (
|
||||||
|
2681
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
2681
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
@ -0,0 +1,654 @@
|
|||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Merge merges the src message into dst.
|
||||||
|
// This assumes that dst and src of the same type and are non-nil.
|
||||||
|
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||||
|
mi := atomicLoadMergeInfo(&a.merge)
|
||||||
|
if mi == nil {
|
||||||
|
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||||
|
atomicStoreMergeInfo(&a.merge, mi)
|
||||||
|
}
|
||||||
|
mi.merge(toPointer(&dst), toPointer(&src))
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergeInfo struct {
|
||||||
|
typ reflect.Type
|
||||||
|
|
||||||
|
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||||
|
lock sync.Mutex
|
||||||
|
|
||||||
|
fields []mergeFieldInfo
|
||||||
|
unrecognized field // Offset of XXX_unrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergeFieldInfo struct {
|
||||||
|
field field // Offset of field, guaranteed to be valid
|
||||||
|
|
||||||
|
// isPointer reports whether the value in the field is a pointer.
|
||||||
|
// This is true for the following situations:
|
||||||
|
// * Pointer to struct
|
||||||
|
// * Pointer to basic type (proto2 only)
|
||||||
|
// * Slice (first value in slice header is a pointer)
|
||||||
|
// * String (first value in string header is a pointer)
|
||||||
|
isPointer bool
|
||||||
|
|
||||||
|
// basicWidth reports the width of the field assuming that it is directly
|
||||||
|
// embedded in the struct (as is the case for basic types in proto3).
|
||||||
|
// The possible values are:
|
||||||
|
// 0: invalid
|
||||||
|
// 1: bool
|
||||||
|
// 4: int32, uint32, float32
|
||||||
|
// 8: int64, uint64, float64
|
||||||
|
basicWidth int
|
||||||
|
|
||||||
|
// Where dst and src are pointers to the types being merged.
|
||||||
|
merge func(dst, src pointer)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||||
|
mergeInfoLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||||
|
mergeInfoLock.Lock()
|
||||||
|
defer mergeInfoLock.Unlock()
|
||||||
|
mi := mergeInfoMap[t]
|
||||||
|
if mi == nil {
|
||||||
|
mi = &mergeInfo{typ: t}
|
||||||
|
mergeInfoMap[t] = mi
|
||||||
|
}
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
|
||||||
|
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||||
|
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||||
|
if dst.isNil() {
|
||||||
|
panic("proto: nil destination")
|
||||||
|
}
|
||||||
|
if src.isNil() {
|
||||||
|
return // Nothing to do.
|
||||||
|
}
|
||||||
|
|
||||||
|
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||||
|
mi.computeMergeInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fi := range mi.fields {
|
||||||
|
sfp := src.offset(fi.field)
|
||||||
|
|
||||||
|
// As an optimization, we can avoid the merge function call cost
|
||||||
|
// if we know for sure that the source will have no effect
|
||||||
|
// by checking if it is the zero value.
|
||||||
|
if unsafeAllowed {
|
||||||
|
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fi.basicWidth > 0 {
|
||||||
|
switch {
|
||||||
|
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||||
|
continue
|
||||||
|
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||||
|
continue
|
||||||
|
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dfp := dst.offset(fi.field)
|
||||||
|
fi.merge(dfp, sfp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Make this faster?
|
||||||
|
out := dst.asPointerTo(mi.typ).Elem()
|
||||||
|
in := src.asPointerTo(mi.typ).Elem()
|
||||||
|
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||||
|
emOut, _ := extendable(out.Addr().Interface())
|
||||||
|
mIn, muIn := emIn.extensionsRead()
|
||||||
|
if mIn != nil {
|
||||||
|
mOut := emOut.extensionsWrite()
|
||||||
|
muIn.Lock()
|
||||||
|
mergeExtension(mOut, mIn)
|
||||||
|
muIn.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mi.unrecognized.IsValid() {
|
||||||
|
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||||
|
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mi *mergeInfo) computeMergeInfo() {
|
||||||
|
mi.lock.Lock()
|
||||||
|
defer mi.lock.Unlock()
|
||||||
|
if mi.initialized != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := mi.typ
|
||||||
|
n := t.NumField()
|
||||||
|
|
||||||
|
props := GetProperties(t)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mfi := mergeFieldInfo{field: toField(&f)}
|
||||||
|
tf := f.Type
|
||||||
|
|
||||||
|
// As an optimization, we can avoid the merge function call cost
|
||||||
|
// if we know for sure that the source will have no effect
|
||||||
|
// by checking if it is the zero value.
|
||||||
|
if unsafeAllowed {
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||||
|
// As a special case, we assume slices and strings are pointers
|
||||||
|
// since we know that the first field in the SliceSlice or
|
||||||
|
// StringHeader is a data pointer.
|
||||||
|
mfi.isPointer = true
|
||||||
|
case reflect.Bool:
|
||||||
|
mfi.basicWidth = 1
|
||||||
|
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||||
|
mfi.basicWidth = 4
|
||||||
|
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||||
|
mfi.basicWidth = 8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap tf to get at its most basic type.
|
||||||
|
var isPointer, isSlice bool
|
||||||
|
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||||
|
isSlice = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if tf.Kind() == reflect.Ptr {
|
||||||
|
isPointer = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||||
|
panic("both pointer and slice for basic type in " + tf.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []int32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||||
|
/*
|
||||||
|
sfsp := src.toInt32Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toInt32Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []int64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
sfs := src.getInt32Slice()
|
||||||
|
if sfs != nil {
|
||||||
|
dfs := dst.getInt32Slice()
|
||||||
|
dfs = append(dfs, sfs...)
|
||||||
|
if dfs == nil {
|
||||||
|
dfs = []int32{}
|
||||||
|
}
|
||||||
|
dst.setInt32Slice(dfs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *int32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||||
|
/*
|
||||||
|
sfpp := src.toInt32Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toInt32Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Int32(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
sfp := src.getInt32Ptr()
|
||||||
|
if sfp != nil {
|
||||||
|
dfp := dst.getInt32Ptr()
|
||||||
|
if dfp == nil {
|
||||||
|
dst.setInt32Ptr(*sfp)
|
||||||
|
} else {
|
||||||
|
*dfp = *sfp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., int32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toInt32(); v != 0 {
|
||||||
|
*dst.toInt32() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Int64:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []int64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toInt64Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toInt64Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []int64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *int64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toInt64Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toInt64Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Int64(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., int64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toInt64(); v != 0 {
|
||||||
|
*dst.toInt64() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Uint32:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []uint32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toUint32Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toUint32Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []uint32{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *uint32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toUint32Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toUint32Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Uint32(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., uint32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toUint32(); v != 0 {
|
||||||
|
*dst.toUint32() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Uint64:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []uint64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toUint64Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toUint64Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []uint64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *uint64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toUint64Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toUint64Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Uint64(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., uint64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toUint64(); v != 0 {
|
||||||
|
*dst.toUint64() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Float32:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []float32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toFloat32Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toFloat32Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []float32{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *float32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toFloat32Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toFloat32Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Float32(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., float32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toFloat32(); v != 0 {
|
||||||
|
*dst.toFloat32() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Float64:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []float64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toFloat64Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toFloat64Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []float64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *float64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toFloat64Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toFloat64Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Float64(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., float64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toFloat64(); v != 0 {
|
||||||
|
*dst.toFloat64() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Bool:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []bool
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toBoolSlice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toBoolSlice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []bool{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *bool
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toBoolPtr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toBoolPtr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Bool(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., bool
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toBool(); v {
|
||||||
|
*dst.toBool() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []string
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toStringSlice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toStringSlice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []string{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *string
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toStringPtr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toStringPtr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = String(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., string
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toString(); v != "" {
|
||||||
|
*dst.toString() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
isProto3 := props.Prop[i].proto3
|
||||||
|
switch {
|
||||||
|
case isPointer:
|
||||||
|
panic("bad pointer in byte slice case in " + tf.Name())
|
||||||
|
case tf.Elem().Kind() != reflect.Uint8:
|
||||||
|
panic("bad element kind in byte slice case in " + tf.Name())
|
||||||
|
case isSlice: // E.g., [][]byte
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sbsp := src.toBytesSlice()
|
||||||
|
if *sbsp != nil {
|
||||||
|
dbsp := dst.toBytesSlice()
|
||||||
|
for _, sb := range *sbsp {
|
||||||
|
if sb == nil {
|
||||||
|
*dbsp = append(*dbsp, nil)
|
||||||
|
} else {
|
||||||
|
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if *dbsp == nil {
|
||||||
|
*dbsp = [][]byte{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., []byte
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sbp := src.toBytes()
|
||||||
|
if *sbp != nil {
|
||||||
|
dbp := dst.toBytes()
|
||||||
|
if !isProto3 || len(*sbp) > 0 {
|
||||||
|
*dbp = append([]byte{}, *sbp...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
switch {
|
||||||
|
case !isPointer:
|
||||||
|
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||||
|
case isSlice: // E.g., []*pb.T
|
||||||
|
mi := getMergeInfo(tf)
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sps := src.getPointerSlice()
|
||||||
|
if sps != nil {
|
||||||
|
dps := dst.getPointerSlice()
|
||||||
|
for _, sp := range sps {
|
||||||
|
var dp pointer
|
||||||
|
if !sp.isNil() {
|
||||||
|
dp = valToPointer(reflect.New(tf))
|
||||||
|
mi.merge(dp, sp)
|
||||||
|
}
|
||||||
|
dps = append(dps, dp)
|
||||||
|
}
|
||||||
|
if dps == nil {
|
||||||
|
dps = []pointer{}
|
||||||
|
}
|
||||||
|
dst.setPointerSlice(dps)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., *pb.T
|
||||||
|
mi := getMergeInfo(tf)
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sp := src.getPointer()
|
||||||
|
if !sp.isNil() {
|
||||||
|
dp := dst.getPointer()
|
||||||
|
if dp.isNil() {
|
||||||
|
dp = valToPointer(reflect.New(tf))
|
||||||
|
dst.setPointer(dp)
|
||||||
|
}
|
||||||
|
mi.merge(dp, sp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic("bad pointer or slice in map case in " + tf.Name())
|
||||||
|
default: // E.g., map[K]V
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sm := src.asPointerTo(tf).Elem()
|
||||||
|
if sm.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dm := dst.asPointerTo(tf).Elem()
|
||||||
|
if dm.IsNil() {
|
||||||
|
dm.Set(reflect.MakeMap(tf))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Elem().Kind() {
|
||||||
|
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||||
|
dm.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||||
|
dm.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
default: // Basic type (e.g., string)
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
dm.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
// Must be oneof field.
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||||
|
default: // E.g., interface{}
|
||||||
|
// TODO: Make this faster?
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
su := src.asPointerTo(tf).Elem()
|
||||||
|
if !su.IsNil() {
|
||||||
|
du := dst.asPointerTo(tf).Elem()
|
||||||
|
typ := su.Elem().Type()
|
||||||
|
if du.IsNil() || du.Elem().Type() != typ {
|
||||||
|
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||||
|
}
|
||||||
|
sv := su.Elem().Elem().Field(0)
|
||||||
|
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dv := du.Elem().Elem().Field(0)
|
||||||
|
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||||
|
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||||
|
}
|
||||||
|
switch sv.Type().Kind() {
|
||||||
|
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||||
|
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||||
|
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||||
|
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||||
|
default: // Basic type (e.g., string)
|
||||||
|
dv.Set(sv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||||
|
}
|
||||||
|
mi.fields = append(mi.fields, mfi)
|
||||||
|
}
|
||||||
|
|
||||||
|
mi.unrecognized = invalidField
|
||||||
|
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||||
|
if f.Type != reflect.TypeOf([]byte{}) {
|
||||||
|
panic("expected XXX_unrecognized to be of type []byte")
|
||||||
|
}
|
||||||
|
mi.unrecognized = toField(&f)
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StoreInt32(&mi.initialized, 1)
|
||||||
|
}
|
1967
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
Normal file
1967
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
61
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
61
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@ -50,7 +50,6 @@ import (
|
|||||||
var (
|
var (
|
||||||
newline = []byte("\n")
|
newline = []byte("\n")
|
||||||
spaces = []byte(" ")
|
spaces = []byte(" ")
|
||||||
gtNewline = []byte(">\n")
|
|
||||||
endBraceNewline = []byte("}\n")
|
endBraceNewline = []byte("}\n")
|
||||||
backslashN = []byte{'\\', 'n'}
|
backslashN = []byte{'\\', 'n'}
|
||||||
backslashR = []byte{'\\', 'r'}
|
backslashR = []byte{'\\', 'r'}
|
||||||
@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// raw is the interface satisfied by RawMessage.
|
|
||||||
type raw interface {
|
|
||||||
Bytes() []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func requiresQuotes(u string) bool {
|
func requiresQuotes(u string) bool {
|
||||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||||
for _, ch := range u {
|
for _, ch := range u {
|
||||||
@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
props := sprops.Prop[i]
|
props := sprops.Prop[i]
|
||||||
name := st.Field(i).Name
|
name := st.Field(i).Name
|
||||||
|
|
||||||
|
if name == "XXX_NoUnkeyedLiteral" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(name, "XXX_") {
|
if strings.HasPrefix(name, "XXX_") {
|
||||||
// There are two XXX_ fields:
|
// There are two XXX_ fields:
|
||||||
// XXX_unrecognized []byte
|
// XXX_unrecognized []byte
|
||||||
@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b, ok := fv.Interface().(raw); ok {
|
|
||||||
if err := writeRaw(w, b.Bytes()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enums have a String method, so writeAny will work fine.
|
// Enums have a String method, so writeAny will work fine.
|
||||||
if err := tm.writeAny(w, fv, props); err != nil {
|
if err := tm.writeAny(w, fv, props); err != nil {
|
||||||
@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
|
|
||||||
// Extensions (the XXX_extensions field).
|
// Extensions (the XXX_extensions field).
|
||||||
pv := sv.Addr()
|
pv := sv.Addr()
|
||||||
if _, ok := extendable(pv.Interface()); ok {
|
if _, err := extendable(pv.Interface()); err == nil {
|
||||||
if err := tm.writeExtensions(w, pv); err != nil {
|
if err := tm.writeExtensions(w, pv); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeRaw writes an uninterpreted raw message.
|
|
||||||
func writeRaw(w *textWriter, b []byte) error {
|
|
||||||
if err := w.WriteByte('<'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.indent()
|
|
||||||
if err := writeUnknownStruct(w, b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.unindent()
|
|
||||||
if err := w.WriteByte('>'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeAny writes an arbitrary field.
|
// writeAny writes an arbitrary field.
|
||||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||||
v = reflect.Indirect(v)
|
v = reflect.Indirect(v)
|
||||||
@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.indent()
|
w.indent()
|
||||||
|
if v.CanAddr() {
|
||||||
|
// Calling v.Interface on a struct causes the reflect package to
|
||||||
|
// copy the entire struct. This is racy with the new Marshaler
|
||||||
|
// since we atomically update the XXX_sizecache.
|
||||||
|
//
|
||||||
|
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||||
|
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||||
|
//
|
||||||
|
// If v is not addressable, then we are not worried about a race
|
||||||
|
// since it implies that the binary Marshaler cannot possibly be
|
||||||
|
// mutating this value.
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||||
text, err := etm.MarshalText()
|
text, err := etm.MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
|||||||
if _, err = w.Write(text); err != nil {
|
if _, err = w.Write(text); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if err := tm.writeStruct(w, v); err != nil {
|
} else {
|
||||||
return err
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
if err := tm.writeStruct(w, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
w.unindent()
|
w.unindent()
|
||||||
if err := w.WriteByte(ket); err != nil {
|
if err := w.WriteByte(ket); err != nil {
|
||||||
|
77
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
77
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@ -206,7 +206,6 @@ func (p *textParser) advance() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||||
errBadHex = errors.New("proto: bad hexadecimal")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func unquoteC(s string, quote rune) (string, error) {
|
func unquoteC(s string, quote rune) (string, error) {
|
||||||
@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) {
|
|||||||
return "?", s, nil // trigraph workaround
|
return "?", s, nil // trigraph workaround
|
||||||
case '\'', '"', '\\':
|
case '\'', '"', '\\':
|
||||||
return string(r), s, nil
|
return string(r), s, nil
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
if len(s) < 2 {
|
if len(s) < 2 {
|
||||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||||
}
|
}
|
||||||
base := 8
|
ss := string(r) + s[:2]
|
||||||
ss := s[:2]
|
|
||||||
s = s[2:]
|
s = s[2:]
|
||||||
if r == 'x' || r == 'X' {
|
i, err := strconv.ParseUint(ss, 8, 8)
|
||||||
base = 16
|
|
||||||
} else {
|
|
||||||
ss = string(r) + ss
|
|
||||||
}
|
|
||||||
i, err := strconv.ParseUint(ss, base, 8)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||||
}
|
}
|
||||||
return string([]byte{byte(i)}), s, nil
|
return string([]byte{byte(i)}), s, nil
|
||||||
case 'u', 'U':
|
case 'x', 'X', 'u', 'U':
|
||||||
n := 4
|
var n int
|
||||||
if r == 'U' {
|
switch r {
|
||||||
|
case 'x', 'X':
|
||||||
|
n = 2
|
||||||
|
case 'u':
|
||||||
|
n = 4
|
||||||
|
case 'U':
|
||||||
n = 8
|
n = 8
|
||||||
}
|
}
|
||||||
if len(s) < n {
|
if len(s) < n {
|
||||||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
|
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||||
}
|
|
||||||
|
|
||||||
bs := make([]byte, n/2)
|
|
||||||
for i := 0; i < n; i += 2 {
|
|
||||||
a, ok1 := unhex(s[i])
|
|
||||||
b, ok2 := unhex(s[i+1])
|
|
||||||
if !ok1 || !ok2 {
|
|
||||||
return "", "", errBadHex
|
|
||||||
}
|
|
||||||
bs[i/2] = a<<4 | b
|
|
||||||
}
|
}
|
||||||
|
ss := s[:n]
|
||||||
s = s[n:]
|
s = s[n:]
|
||||||
return string(bs), s, nil
|
i, err := strconv.ParseUint(ss, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||||
|
}
|
||||||
|
if r == 'x' || r == 'X' {
|
||||||
|
return string([]byte{byte(i)}), s, nil
|
||||||
|
}
|
||||||
|
if i > utf8.MaxRune {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||||
|
}
|
||||||
|
return string(i), s, nil
|
||||||
}
|
}
|
||||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adapted from src/pkg/strconv/quote.go.
|
|
||||||
func unhex(b byte) (v byte, ok bool) {
|
|
||||||
switch {
|
|
||||||
case '0' <= b && b <= '9':
|
|
||||||
return b - '0', true
|
|
||||||
case 'a' <= b && b <= 'f':
|
|
||||||
return b - 'a' + 10, true
|
|
||||||
case 'A' <= b && b <= 'F':
|
|
||||||
return b - 'A' + 10, true
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Back off the parser by one token. Can only be done between calls to next().
|
// Back off the parser by one token. Can only be done between calls to next().
|
||||||
// It makes the next advance() a no-op.
|
// It makes the next advance() a no-op.
|
||||||
func (p *textParser) back() { p.backed = true }
|
func (p *textParser) back() { p.backed = true }
|
||||||
@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) {
|
|||||||
if tok.err != nil {
|
if tok.err != nil {
|
||||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||||
}
|
}
|
||||||
|
if p.done && tok.value != "]" {
|
||||||
|
return "", p.errorf("unclosed type_url or extension name")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(parts, ""), nil
|
return strings.Join(parts, ""), nil
|
||||||
}
|
}
|
||||||
@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||||||
return p.readStruct(fv, terminator)
|
return p.readStruct(fv, terminator)
|
||||||
case reflect.Uint32:
|
case reflect.Uint32:
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||||
fv.SetUint(x)
|
fv.SetUint(uint64(x))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
case reflect.Uint64:
|
case reflect.Uint64:
|
||||||
@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||||||
// UnmarshalText returns *RequiredNotSetError.
|
// UnmarshalText returns *RequiredNotSetError.
|
||||||
func UnmarshalText(s string, pb Message) error {
|
func UnmarshalText(s string, pb Message) error {
|
||||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||||
err := um.UnmarshalText([]byte(s))
|
return um.UnmarshalText([]byte(s))
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
pb.Reset()
|
pb.Reset()
|
||||||
v := reflect.ValueOf(pb)
|
v := reflect.ValueOf(pb)
|
||||||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
return newTextParser(s).readStruct(v.Elem(), "")
|
||||||
return pe
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
49
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
49
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
@ -1,16 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/any.proto
|
// source: google/protobuf/any.proto
|
||||||
|
|
||||||
/*
|
package any // import "github.com/golang/protobuf/ptypes/any"
|
||||||
Package any is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
google/protobuf/any.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Any
|
|
||||||
*/
|
|
||||||
package any
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -132,14 +123,36 @@ type Any struct {
|
|||||||
//
|
//
|
||||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
|
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
|
||||||
// Must be a valid serialized protocol buffer of the above specified type.
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Any) Reset() { *m = Any{} }
|
func (m *Any) Reset() { *m = Any{} }
|
||||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Any) ProtoMessage() {}
|
func (*Any) ProtoMessage() {}
|
||||||
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*Any) Descriptor() ([]byte, []int) {
|
||||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
return fileDescriptor_any_744b9ca530f228db, []int{0}
|
||||||
|
}
|
||||||
|
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||||
|
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Any.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Any) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Any.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Any) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Any.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Any) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Any.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Any proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Any) GetTypeUrl() string {
|
func (m *Any) GetTypeUrl() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -159,9 +172,9 @@ func init() {
|
|||||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor_any_744b9ca530f228db = []byte{
|
||||||
// 185 bytes of a gzipped FileDescriptorProto
|
// 185 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||||
|
51
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
51
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
@ -1,16 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/duration.proto
|
// source: google/protobuf/duration.proto
|
||||||
|
|
||||||
/*
|
package duration // import "github.com/golang/protobuf/ptypes/duration"
|
||||||
Package duration is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
google/protobuf/duration.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Duration
|
|
||||||
*/
|
|
||||||
package duration
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -98,14 +89,36 @@ type Duration struct {
|
|||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
// of one second or more, a non-zero value for the `nanos` field must be
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||||
// to +999,999,999 inclusive.
|
// to +999,999,999 inclusive.
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Duration) Reset() { *m = Duration{} }
|
func (m *Duration) Reset() { *m = Duration{} }
|
||||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Duration) ProtoMessage() {}
|
func (*Duration) ProtoMessage() {}
|
||||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*Duration) Descriptor() ([]byte, []int) {
|
||||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
return fileDescriptor_duration_e7d612259e3f0613, []int{0}
|
||||||
|
}
|
||||||
|
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||||
|
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Duration) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Duration.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Duration.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Duration) GetSeconds() int64 {
|
func (m *Duration) GetSeconds() int64 {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -125,9 +138,11 @@ func init() {
|
|||||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor_duration_e7d612259e3f0613 = []byte{
|
||||||
// 190 bytes of a gzipped FileDescriptorProto
|
// 190 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||||
|
53
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
53
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@ -1,16 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/timestamp.proto
|
// source: google/protobuf/timestamp.proto
|
||||||
|
|
||||||
/*
|
package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
Package timestamp is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
google/protobuf/timestamp.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Timestamp
|
|
||||||
*/
|
|
||||||
package timestamp
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -101,7 +92,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||||
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
|
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
|
||||||
// to obtain a formatter capable of generating timestamps in this format.
|
// to obtain a formatter capable of generating timestamps in this format.
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
@ -114,14 +105,36 @@ type Timestamp struct {
|
|||||||
// second values with fractions must still have non-negative nanos values
|
// second values with fractions must still have non-negative nanos values
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
// that count forward in time. Must be from 0 to 999,999,999
|
||||||
// inclusive.
|
// inclusive.
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Timestamp) ProtoMessage() {}
|
func (*Timestamp) ProtoMessage() {}
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
|
||||||
|
}
|
||||||
|
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||||
|
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Timestamp) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Timestamp.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Timestamp.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Timestamp) GetSeconds() int64 {
|
func (m *Timestamp) GetSeconds() int64 {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -141,9 +154,11 @@ func init() {
|
|||||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
|
||||||
// 191 bytes of a gzipped FileDescriptorProto
|
// 191 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||||
|
2
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
2
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
@ -114,7 +114,7 @@ option objc_class_prefix = "GPB";
|
|||||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||||
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
|
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
|
||||||
// to obtain a formatter capable of generating timestamps in this format.
|
// to obtain a formatter capable of generating timestamps in this format.
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
|
3
vendor/google.golang.org/grpc/README.md
generated
vendored
3
vendor/google.golang.org/grpc/README.md
generated
vendored
@ -16,8 +16,7 @@ $ go get -u google.golang.org/grpc
|
|||||||
Prerequisites
|
Prerequisites
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
This requires Go 1.6 or later. Go 1.7 will be required as of the next gRPC-Go
|
This requires Go 1.6 or later. Go 1.7 will be required soon.
|
||||||
release (1.8).
|
|
||||||
|
|
||||||
Constraints
|
Constraints
|
||||||
-----------
|
-----------
|
||||||
|
13
vendor/google.golang.org/grpc/balancer.go
generated
vendored
13
vendor/google.golang.org/grpc/balancer.go
generated
vendored
@ -32,7 +32,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Address represents a server the client connects to.
|
// Address represents a server the client connects to.
|
||||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
//
|
||||||
|
// Deprecated: please use package balancer.
|
||||||
type Address struct {
|
type Address struct {
|
||||||
// Addr is the server address on which a connection will be established.
|
// Addr is the server address on which a connection will be established.
|
||||||
Addr string
|
Addr string
|
||||||
@ -42,6 +43,8 @@ type Address struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BalancerConfig specifies the configurations for Balancer.
|
// BalancerConfig specifies the configurations for Balancer.
|
||||||
|
//
|
||||||
|
// Deprecated: please use package balancer.
|
||||||
type BalancerConfig struct {
|
type BalancerConfig struct {
|
||||||
// DialCreds is the transport credential the Balancer implementation can
|
// DialCreds is the transport credential the Balancer implementation can
|
||||||
// use to dial to a remote load balancer server. The Balancer implementations
|
// use to dial to a remote load balancer server. The Balancer implementations
|
||||||
@ -54,7 +57,8 @@ type BalancerConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BalancerGetOptions configures a Get call.
|
// BalancerGetOptions configures a Get call.
|
||||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
//
|
||||||
|
// Deprecated: please use package balancer.
|
||||||
type BalancerGetOptions struct {
|
type BalancerGetOptions struct {
|
||||||
// BlockingWait specifies whether Get should block when there is no
|
// BlockingWait specifies whether Get should block when there is no
|
||||||
// connected address.
|
// connected address.
|
||||||
@ -62,7 +66,8 @@ type BalancerGetOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Balancer chooses network addresses for RPCs.
|
// Balancer chooses network addresses for RPCs.
|
||||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
//
|
||||||
|
// Deprecated: please use package balancer.
|
||||||
type Balancer interface {
|
type Balancer interface {
|
||||||
// Start does the initialization work to bootstrap a Balancer. For example,
|
// Start does the initialization work to bootstrap a Balancer. For example,
|
||||||
// this function may start the name resolution and watch the updates. It will
|
// this function may start the name resolution and watch the updates. It will
|
||||||
@ -135,6 +140,8 @@ func downErrorf(timeout, temporary bool, format string, a ...interface{}) downEr
|
|||||||
|
|
||||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
||||||
// the name resolution updates and updates the addresses available correspondingly.
|
// the name resolution updates and updates the addresses available correspondingly.
|
||||||
|
//
|
||||||
|
// Deprecated: please use package balancer/roundrobin.
|
||||||
func RoundRobin(r naming.Resolver) Balancer {
|
func RoundRobin(r naming.Resolver) Balancer {
|
||||||
return &roundRobin{r: r}
|
return &roundRobin{r: r}
|
||||||
}
|
}
|
||||||
|
13
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
13
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@ -36,9 +36,12 @@ var (
|
|||||||
m = make(map[string]Builder)
|
m = make(map[string]Builder)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register registers the balancer builder to the balancer map.
|
// Register registers the balancer builder to the balancer map. b.Name
|
||||||
// b.Name (lowercased) will be used as the name registered with
|
// (lowercased) will be used as the name registered with this builder.
|
||||||
// this builder.
|
//
|
||||||
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
|
// an init() function), and is not thread-safe. If multiple Balancers are
|
||||||
|
// registered with the same name, the one registered last will take effect.
|
||||||
func Register(b Builder) {
|
func Register(b Builder) {
|
||||||
m[strings.ToLower(b.Name())] = b
|
m[strings.ToLower(b.Name())] = b
|
||||||
}
|
}
|
||||||
@ -126,6 +129,8 @@ type BuildOptions struct {
|
|||||||
// to a remote load balancer server. The Balancer implementations
|
// to a remote load balancer server. The Balancer implementations
|
||||||
// can ignore this if it doesn't need to talk to remote balancer.
|
// can ignore this if it doesn't need to talk to remote balancer.
|
||||||
Dialer func(context.Context, string) (net.Conn, error)
|
Dialer func(context.Context, string) (net.Conn, error)
|
||||||
|
// ChannelzParentID is the entity parent's channelz unique identification number.
|
||||||
|
ChannelzParentID int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builder creates a balancer.
|
// Builder creates a balancer.
|
||||||
@ -160,7 +165,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||||
// Balancer is expected to generate a new picker from its snapshot everytime its
|
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||||
// internal state has changed.
|
// internal state has changed.
|
||||||
//
|
//
|
||||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||||
|
1
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
1
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
@ -146,7 +146,6 @@ func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectiv
|
|||||||
}
|
}
|
||||||
|
|
||||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||||
|
2
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
2
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
@ -115,7 +115,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
|
|||||||
return ccb
|
return ccb
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcher balancer functions sequencially, so the balancer can be implemeneted
|
// watcher balancer functions sequentially, so the balancer can be implemented
|
||||||
// lock-free.
|
// lock-free.
|
||||||
func (ccb *ccBalancerWrapper) watcher() {
|
func (ccb *ccBalancerWrapper) watcher() {
|
||||||
for {
|
for {
|
||||||
|
3
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
3
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
@ -257,7 +257,6 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
|
|||||||
// Remove state for this sc.
|
// Remove state for this sc.
|
||||||
delete(bw.connSt, sc)
|
delete(bw.connSt, sc)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
||||||
@ -270,7 +269,6 @@ func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
|||||||
}
|
}
|
||||||
// There should be a resolver inside the balancer.
|
// There should be a resolver inside the balancer.
|
||||||
// All updates here, if any, are ignored.
|
// All updates here, if any, are ignored.
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bw *balancerWrapper) Close() {
|
func (bw *balancerWrapper) Close() {
|
||||||
@ -282,7 +280,6 @@ func (bw *balancerWrapper) Close() {
|
|||||||
close(bw.startCh)
|
close(bw.startCh)
|
||||||
}
|
}
|
||||||
bw.balancer.Close()
|
bw.balancer.Close()
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The picker is the balancerWrapper itself.
|
// The picker is the balancerWrapper itself.
|
||||||
|
23
vendor/google.golang.org/grpc/call.go
generated
vendored
23
vendor/google.golang.org/grpc/call.go
generated
vendored
@ -27,12 +27,31 @@ import (
|
|||||||
//
|
//
|
||||||
// All errors returned by Invoke are compatible with the status package.
|
// All errors returned by Invoke are compatible with the status package.
|
||||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||||
|
// allow interceptor to see all applicable call options, which means those
|
||||||
|
// configured as defaults from dial option as well as per-call options
|
||||||
|
opts = combine(cc.dopts.callOptions, opts)
|
||||||
|
|
||||||
if cc.dopts.unaryInt != nil {
|
if cc.dopts.unaryInt != nil {
|
||||||
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
||||||
}
|
}
|
||||||
return invoke(ctx, method, args, reply, cc, opts...)
|
return invoke(ctx, method, args, reply, cc, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||||
|
// we don't use append because o1 could have extra capacity whose
|
||||||
|
// elements would be overwritten, which could cause inadvertent
|
||||||
|
// sharing (and race connditions) between concurrent calls
|
||||||
|
if len(o1) == 0 {
|
||||||
|
return o2
|
||||||
|
} else if len(o2) == 0 {
|
||||||
|
return o1
|
||||||
|
}
|
||||||
|
ret := make([]CallOption, len(o1)+len(o2))
|
||||||
|
copy(ret, o1)
|
||||||
|
copy(ret[len(o1):], o2)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
// Invoke sends the RPC request on the wire and returns after response is
|
// Invoke sends the RPC request on the wire and returns after response is
|
||||||
// received. This is typically called by generated code.
|
// received. This is typically called by generated code.
|
||||||
//
|
//
|
||||||
@ -54,7 +73,7 @@ func invoke(ctx context.Context, method string, req, reply interface{}, cc *Clie
|
|||||||
}
|
}
|
||||||
cs := csInt.(*clientStream)
|
cs := csInt.(*clientStream)
|
||||||
if err := cs.SendMsg(req); err != nil {
|
if err := cs.SendMsg(req); err != nil {
|
||||||
if !cs.c.failFast && cs.s.Unprocessed() && firstAttempt {
|
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
||||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
||||||
firstAttempt = false
|
firstAttempt = false
|
||||||
continue
|
continue
|
||||||
@ -62,7 +81,7 @@ func invoke(ctx context.Context, method string, req, reply interface{}, cc *Clie
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := cs.RecvMsg(reply); err != nil {
|
if err := cs.RecvMsg(reply); err != nil {
|
||||||
if !cs.c.failFast && cs.s.Unprocessed() && firstAttempt {
|
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
||||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
||||||
firstAttempt = false
|
firstAttempt = false
|
||||||
continue
|
continue
|
||||||
|
573
vendor/google.golang.org/grpc/channelz/funcs.go
generated
vendored
Normal file
573
vendor/google.golang.org/grpc/channelz/funcs.go
generated
vendored
Normal file
@ -0,0 +1,573 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package channelz defines APIs for enabling channelz service, entry
|
||||||
|
// registration/deletion, and accessing channelz data. It also defines channelz
|
||||||
|
// metric struct formats.
|
||||||
|
//
|
||||||
|
// All APIs in this package are experimental.
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
db dbWrapper
|
||||||
|
idGen idGenerator
|
||||||
|
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||||
|
EntryPerPage = 50
|
||||||
|
curState int32
|
||||||
|
)
|
||||||
|
|
||||||
|
// TurnOn turns on channelz data collection.
|
||||||
|
func TurnOn() {
|
||||||
|
if !IsOn() {
|
||||||
|
NewChannelzStorage()
|
||||||
|
atomic.StoreInt32(&curState, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOn returns whether channelz data collection is on.
|
||||||
|
func IsOn() bool {
|
||||||
|
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||||
|
// provide synchronized functionality to set and get the reference.
|
||||||
|
type dbWrapper struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
DB *channelMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dbWrapper) set(db *channelMap) {
|
||||||
|
d.mu.Lock()
|
||||||
|
d.DB = db
|
||||||
|
d.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dbWrapper) get() *channelMap {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
return d.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChannelzStorage initializes channelz data storage and id generator.
|
||||||
|
//
|
||||||
|
// Note: This function is exported for testing purpose only. User should not call
|
||||||
|
// it in most cases.
|
||||||
|
func NewChannelzStorage() {
|
||||||
|
db.set(&channelMap{
|
||||||
|
topLevelChannels: make(map[int64]struct{}),
|
||||||
|
channels: make(map[int64]*channel),
|
||||||
|
listenSockets: make(map[int64]*listenSocket),
|
||||||
|
normalSockets: make(map[int64]*normalSocket),
|
||||||
|
servers: make(map[int64]*server),
|
||||||
|
subChannels: make(map[int64]*subChannel),
|
||||||
|
})
|
||||||
|
idGen.reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||||
|
// boolean indicating whether there's more top channels to be queried for.
|
||||||
|
//
|
||||||
|
// The arg id specifies that only top channel with id at or above it will be included
|
||||||
|
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||||
|
// sorted in ascending id order.
|
||||||
|
func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||||
|
return db.get().GetTopChannels(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServers returns a slice of server's ServerMetric, along with a
|
||||||
|
// boolean indicating whether there's more servers to be queried for.
|
||||||
|
//
|
||||||
|
// The arg id specifies that only server with id at or above it will be included
|
||||||
|
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||||
|
// sorted in ascending id order.
|
||||||
|
func GetServers(id int64) ([]*ServerMetric, bool) {
|
||||||
|
return db.get().GetServers(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||||
|
// SocketMetric, along with a boolean indicating whether there's more sockets to
|
||||||
|
// be queried for.
|
||||||
|
//
|
||||||
|
// The arg startID specifies that only sockets with id at or above it will be
|
||||||
|
// included in the result. The returned slice is up to a length of EntryPerPage,
|
||||||
|
// and is sorted in ascending id order.
|
||||||
|
func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||||
|
return db.get().GetServerSockets(id, startID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||||
|
func GetChannel(id int64) *ChannelMetric {
|
||||||
|
return db.get().GetChannel(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
|
||||||
|
func GetSubChannel(id int64) *SubChannelMetric {
|
||||||
|
return db.get().GetSubChannel(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSocket returns the SocketInternalMetric for the socket (identified by id).
|
||||||
|
func GetSocket(id int64) *SocketMetric {
|
||||||
|
return db.get().GetSocket(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterChannel registers the given channel c in channelz database with ref
|
||||||
|
// as its reference name, and add it to the child list of its parent (identified
|
||||||
|
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||||
|
// assigned to this channel.
|
||||||
|
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||||
|
id := idGen.genID()
|
||||||
|
cn := &channel{
|
||||||
|
refName: ref,
|
||||||
|
c: c,
|
||||||
|
subChans: make(map[int64]string),
|
||||||
|
nestedChans: make(map[int64]string),
|
||||||
|
id: id,
|
||||||
|
pid: pid,
|
||||||
|
}
|
||||||
|
if pid == 0 {
|
||||||
|
db.get().addChannel(id, cn, true, pid, ref)
|
||||||
|
} else {
|
||||||
|
db.get().addChannel(id, cn, false, pid, ref)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterSubChannel registers the given channel c in channelz database with ref
|
||||||
|
// as its reference name, and add it to the child list of its parent (identified
|
||||||
|
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||||
|
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||||
|
if pid == 0 {
|
||||||
|
grpclog.Error("a SubChannel's parent id cannot be 0")
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
id := idGen.genID()
|
||||||
|
sc := &subChannel{
|
||||||
|
refName: ref,
|
||||||
|
c: c,
|
||||||
|
sockets: make(map[int64]string),
|
||||||
|
id: id,
|
||||||
|
pid: pid,
|
||||||
|
}
|
||||||
|
db.get().addSubChannel(id, sc, pid, ref)
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterServer registers the given server s in channelz database. It returns
|
||||||
|
// the unique channelz tracking id assigned to this server.
|
||||||
|
func RegisterServer(s Server, ref string) int64 {
|
||||||
|
id := idGen.genID()
|
||||||
|
svr := &server{
|
||||||
|
refName: ref,
|
||||||
|
s: s,
|
||||||
|
sockets: make(map[int64]string),
|
||||||
|
listenSockets: make(map[int64]string),
|
||||||
|
id: id,
|
||||||
|
}
|
||||||
|
db.get().addServer(id, svr)
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||||
|
// with ref as its reference name, and add it to the child list of its parent
|
||||||
|
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||||
|
// this listen socket.
|
||||||
|
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||||
|
if pid == 0 {
|
||||||
|
grpclog.Error("a ListenSocket's parent id cannot be 0")
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
id := idGen.genID()
|
||||||
|
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
||||||
|
db.get().addListenSocket(id, ls, pid, ref)
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||||
|
// with ref as its reference name, and add it to the child list of its parent
|
||||||
|
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||||
|
// this normal socket.
|
||||||
|
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||||
|
if pid == 0 {
|
||||||
|
grpclog.Error("a NormalSocket's parent id cannot be 0")
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
id := idGen.genID()
|
||||||
|
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
||||||
|
db.get().addNormalSocket(id, ns, pid, ref)
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
|
||||||
|
// channelz database.
|
||||||
|
func RemoveEntry(id int64) {
|
||||||
|
db.get().removeEntry(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// channelMap is the storage data structure for channelz.
|
||||||
|
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||||
|
// 1. Methods acquire the global lock.
|
||||||
|
// 2. Methods that can only be called when global lock is held.
|
||||||
|
// A second type of method need always to be called inside a first type of method.
|
||||||
|
type channelMap struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
topLevelChannels map[int64]struct{}
|
||||||
|
servers map[int64]*server
|
||||||
|
channels map[int64]*channel
|
||||||
|
subChannels map[int64]*subChannel
|
||||||
|
listenSockets map[int64]*listenSocket
|
||||||
|
normalSockets map[int64]*normalSocket
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) addServer(id int64, s *server) {
|
||||||
|
c.mu.Lock()
|
||||||
|
s.cm = c
|
||||||
|
c.servers[id] = s
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
cn.cm = c
|
||||||
|
c.channels[id] = cn
|
||||||
|
if isTopChannel {
|
||||||
|
c.topLevelChannels[id] = struct{}{}
|
||||||
|
} else {
|
||||||
|
c.findEntry(pid).addChild(id, cn)
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
sc.cm = c
|
||||||
|
c.subChannels[id] = sc
|
||||||
|
c.findEntry(pid).addChild(id, sc)
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
ls.cm = c
|
||||||
|
c.listenSockets[id] = ls
|
||||||
|
c.findEntry(pid).addChild(id, ls)
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
ns.cm = c
|
||||||
|
c.normalSockets[id] = ns
|
||||||
|
c.findEntry(pid).addChild(id, ns)
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeEntry triggers the removal of an entry, which may not indeed delete the
|
||||||
|
// entry, if it has to wait on the deletion of its children, or may lead to a chain
|
||||||
|
// of entry deletion. For example, deleting the last socket of a gracefully shutting
|
||||||
|
// down server will lead to the server being also deleted.
|
||||||
|
func (c *channelMap) removeEntry(id int64) {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.findEntry(id).triggerDelete()
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// c.mu must be held by the caller.
|
||||||
|
func (c *channelMap) findEntry(id int64) entry {
|
||||||
|
var v entry
|
||||||
|
var ok bool
|
||||||
|
if v, ok = c.channels[id]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if v, ok = c.subChannels[id]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if v, ok = c.servers[id]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if v, ok = c.listenSockets[id]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if v, ok = c.normalSockets[id]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return &dummyEntry{idNotFound: id}
|
||||||
|
}
|
||||||
|
|
||||||
|
// c.mu must be held by the caller
|
||||||
|
// deleteEntry simply deletes an entry from the channelMap. Before calling this
|
||||||
|
// method, caller must check this entry is ready to be deleted, i.e removeEntry()
|
||||||
|
// has been called on it, and no children still exist.
|
||||||
|
// Conditionals are ordered by the expected frequency of deletion of each entity
|
||||||
|
// type, in order to optimize performance.
|
||||||
|
func (c *channelMap) deleteEntry(id int64) {
|
||||||
|
var ok bool
|
||||||
|
if _, ok = c.normalSockets[id]; ok {
|
||||||
|
delete(c.normalSockets, id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, ok = c.subChannels[id]; ok {
|
||||||
|
delete(c.subChannels, id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, ok = c.channels[id]; ok {
|
||||||
|
delete(c.channels, id)
|
||||||
|
delete(c.topLevelChannels, id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, ok = c.listenSockets[id]; ok {
|
||||||
|
delete(c.listenSockets, id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, ok = c.servers[id]; ok {
|
||||||
|
delete(c.servers, id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type int64Slice []int64
|
||||||
|
|
||||||
|
func (s int64Slice) Len() int { return len(s) }
|
||||||
|
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||||
|
|
||||||
|
func copyMap(m map[int64]string) map[int64]string {
|
||||||
|
n := make(map[int64]string)
|
||||||
|
for k, v := range m {
|
||||||
|
n[k] = v
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||||
|
c.mu.RLock()
|
||||||
|
l := len(c.topLevelChannels)
|
||||||
|
ids := make([]int64, 0, l)
|
||||||
|
cns := make([]*channel, 0, min(l, EntryPerPage))
|
||||||
|
|
||||||
|
for k := range c.topLevelChannels {
|
||||||
|
ids = append(ids, k)
|
||||||
|
}
|
||||||
|
sort.Sort(int64Slice(ids))
|
||||||
|
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||||
|
count := 0
|
||||||
|
var end bool
|
||||||
|
var t []*ChannelMetric
|
||||||
|
for i, v := range ids[idx:] {
|
||||||
|
if count == EntryPerPage {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cn, ok := c.channels[v]; ok {
|
||||||
|
cns = append(cns, cn)
|
||||||
|
t = append(t, &ChannelMetric{
|
||||||
|
NestedChans: copyMap(cn.nestedChans),
|
||||||
|
SubChans: copyMap(cn.subChans),
|
||||||
|
})
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if i == len(ids[idx:])-1 {
|
||||||
|
end = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
if count == 0 {
|
||||||
|
end = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, cn := range cns {
|
||||||
|
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||||
|
t[i].ID = cn.id
|
||||||
|
t[i].RefName = cn.refName
|
||||||
|
}
|
||||||
|
return t, end
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
|
||||||
|
c.mu.RLock()
|
||||||
|
l := len(c.servers)
|
||||||
|
ids := make([]int64, 0, l)
|
||||||
|
ss := make([]*server, 0, min(l, EntryPerPage))
|
||||||
|
for k := range c.servers {
|
||||||
|
ids = append(ids, k)
|
||||||
|
}
|
||||||
|
sort.Sort(int64Slice(ids))
|
||||||
|
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||||
|
count := 0
|
||||||
|
var end bool
|
||||||
|
var s []*ServerMetric
|
||||||
|
for i, v := range ids[idx:] {
|
||||||
|
if count == EntryPerPage {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if svr, ok := c.servers[v]; ok {
|
||||||
|
ss = append(ss, svr)
|
||||||
|
s = append(s, &ServerMetric{
|
||||||
|
ListenSockets: copyMap(svr.listenSockets),
|
||||||
|
})
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if i == len(ids[idx:])-1 {
|
||||||
|
end = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
if count == 0 {
|
||||||
|
end = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, svr := range ss {
|
||||||
|
s[i].ServerData = svr.s.ChannelzMetric()
|
||||||
|
s[i].ID = svr.id
|
||||||
|
s[i].RefName = svr.refName
|
||||||
|
}
|
||||||
|
return s, end
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||||
|
var svr *server
|
||||||
|
var ok bool
|
||||||
|
c.mu.RLock()
|
||||||
|
if svr, ok = c.servers[id]; !ok {
|
||||||
|
// server with id doesn't exist.
|
||||||
|
c.mu.RUnlock()
|
||||||
|
return nil, true
|
||||||
|
}
|
||||||
|
svrskts := svr.sockets
|
||||||
|
l := len(svrskts)
|
||||||
|
ids := make([]int64, 0, l)
|
||||||
|
sks := make([]*normalSocket, 0, min(l, EntryPerPage))
|
||||||
|
for k := range svrskts {
|
||||||
|
ids = append(ids, k)
|
||||||
|
}
|
||||||
|
sort.Sort((int64Slice(ids)))
|
||||||
|
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||||
|
count := 0
|
||||||
|
var end bool
|
||||||
|
for i, v := range ids[idx:] {
|
||||||
|
if count == EntryPerPage {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if ns, ok := c.normalSockets[v]; ok {
|
||||||
|
sks = append(sks, ns)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if i == len(ids[idx:])-1 {
|
||||||
|
end = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
if count == 0 {
|
||||||
|
end = true
|
||||||
|
}
|
||||||
|
var s []*SocketMetric
|
||||||
|
for _, ns := range sks {
|
||||||
|
sm := &SocketMetric{}
|
||||||
|
sm.SocketData = ns.s.ChannelzMetric()
|
||||||
|
sm.ID = ns.id
|
||||||
|
sm.RefName = ns.refName
|
||||||
|
s = append(s, sm)
|
||||||
|
}
|
||||||
|
return s, end
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||||
|
cm := &ChannelMetric{}
|
||||||
|
var cn *channel
|
||||||
|
var ok bool
|
||||||
|
c.mu.RLock()
|
||||||
|
if cn, ok = c.channels[id]; !ok {
|
||||||
|
// channel with id doesn't exist.
|
||||||
|
c.mu.RUnlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cm.NestedChans = copyMap(cn.nestedChans)
|
||||||
|
cm.SubChans = copyMap(cn.subChans)
|
||||||
|
c.mu.RUnlock()
|
||||||
|
cm.ChannelData = cn.c.ChannelzMetric()
|
||||||
|
cm.ID = cn.id
|
||||||
|
cm.RefName = cn.refName
|
||||||
|
return cm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||||
|
cm := &SubChannelMetric{}
|
||||||
|
var sc *subChannel
|
||||||
|
var ok bool
|
||||||
|
c.mu.RLock()
|
||||||
|
if sc, ok = c.subChannels[id]; !ok {
|
||||||
|
// subchannel with id doesn't exist.
|
||||||
|
c.mu.RUnlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cm.Sockets = copyMap(sc.sockets)
|
||||||
|
c.mu.RUnlock()
|
||||||
|
cm.ChannelData = sc.c.ChannelzMetric()
|
||||||
|
cm.ID = sc.id
|
||||||
|
cm.RefName = sc.refName
|
||||||
|
return cm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||||
|
sm := &SocketMetric{}
|
||||||
|
c.mu.RLock()
|
||||||
|
if ls, ok := c.listenSockets[id]; ok {
|
||||||
|
c.mu.RUnlock()
|
||||||
|
sm.SocketData = ls.s.ChannelzMetric()
|
||||||
|
sm.ID = ls.id
|
||||||
|
sm.RefName = ls.refName
|
||||||
|
return sm
|
||||||
|
}
|
||||||
|
if ns, ok := c.normalSockets[id]; ok {
|
||||||
|
c.mu.RUnlock()
|
||||||
|
sm.SocketData = ns.s.ChannelzMetric()
|
||||||
|
sm.ID = ns.id
|
||||||
|
sm.RefName = ns.refName
|
||||||
|
return sm
|
||||||
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type idGenerator struct {
|
||||||
|
id int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *idGenerator) reset() {
|
||||||
|
atomic.StoreInt64(&i.id, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *idGenerator) genID() int64 {
|
||||||
|
return atomic.AddInt64(&i.id, 1)
|
||||||
|
}
|
418
vendor/google.golang.org/grpc/channelz/types.go
generated
vendored
Normal file
418
vendor/google.golang.org/grpc/channelz/types.go
generated
vendored
Normal file
@ -0,0 +1,418 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// entry represents a node in the channelz database.
|
||||||
|
type entry interface {
|
||||||
|
// addChild adds a child e, whose channelz id is id to child list
|
||||||
|
addChild(id int64, e entry)
|
||||||
|
// deleteChild deletes a child with channelz id to be id from child list
|
||||||
|
deleteChild(id int64)
|
||||||
|
// triggerDelete tries to delete self from channelz database. However, if child
|
||||||
|
// list is not empty, then deletion from the database is on hold until the last
|
||||||
|
// child is deleted from database.
|
||||||
|
triggerDelete()
|
||||||
|
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||||
|
// list is now empty. If both conditions are met, then delete self from database.
|
||||||
|
deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
|
||||||
|
// dummyEntry is a fake entry to handle entry not found case.
|
||||||
|
type dummyEntry struct {
|
||||||
|
idNotFound int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dummyEntry) addChild(id int64, e entry) {
|
||||||
|
// Note: It is possible for a normal program to reach here under race condition.
|
||||||
|
// For example, there could be a race between ClientConn.Close() info being propagated
|
||||||
|
// to addrConn and http2Client. ClientConn.Close() cancel the context and result
|
||||||
|
// in http2Client to error. The error info is then caught by transport monitor
|
||||||
|
// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
|
||||||
|
// the addrConn will create a new transport. And when registering the new transport in
|
||||||
|
// channelz, its parent addrConn could have already been torn down and deleted
|
||||||
|
// from channelz tracking, and thus reach the code here.
|
||||||
|
grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dummyEntry) deleteChild(id int64) {
|
||||||
|
// It is possible for a normal program to reach here under race condition.
|
||||||
|
// Refer to the example described in addChild().
|
||||||
|
grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dummyEntry) triggerDelete() {
|
||||||
|
grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*dummyEntry) deleteSelfIfReady() {
|
||||||
|
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||||
|
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||||
|
// child list, etc.
|
||||||
|
type ChannelMetric struct {
|
||||||
|
// ID is the channelz id of this channel.
|
||||||
|
ID int64
|
||||||
|
// RefName is the human readable reference string of this channel.
|
||||||
|
RefName string
|
||||||
|
// ChannelData contains channel internal metric reported by the channel through
|
||||||
|
// ChannelzMetric().
|
||||||
|
ChannelData *ChannelInternalMetric
|
||||||
|
// NestedChans tracks the nested channel type children of this channel in the format of
|
||||||
|
// a map from nested channel channelz id to corresponding reference string.
|
||||||
|
NestedChans map[int64]string
|
||||||
|
// SubChans tracks the subchannel type children of this channel in the format of a
|
||||||
|
// map from subchannel channelz id to corresponding reference string.
|
||||||
|
SubChans map[int64]string
|
||||||
|
// Sockets tracks the socket type children of this channel in the format of a map
|
||||||
|
// from socket channelz id to corresponding reference string.
|
||||||
|
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||||
|
// therefore, this is field is unused.
|
||||||
|
Sockets map[int64]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||||
|
// which includes ChannelInternalMetric and channelz-specific data, such as
|
||||||
|
// channelz id, child list, etc.
|
||||||
|
type SubChannelMetric struct {
|
||||||
|
// ID is the channelz id of this subchannel.
|
||||||
|
ID int64
|
||||||
|
// RefName is the human readable reference string of this subchannel.
|
||||||
|
RefName string
|
||||||
|
// ChannelData contains subchannel internal metric reported by the subchannel
|
||||||
|
// through ChannelzMetric().
|
||||||
|
ChannelData *ChannelInternalMetric
|
||||||
|
// NestedChans tracks the nested channel type children of this subchannel in the format of
|
||||||
|
// a map from nested channel channelz id to corresponding reference string.
|
||||||
|
// Note current grpc implementation doesn't allow subchannel to have nested channels
|
||||||
|
// as children, therefore, this field is unused.
|
||||||
|
NestedChans map[int64]string
|
||||||
|
// SubChans tracks the subchannel type children of this subchannel in the format of a
|
||||||
|
// map from subchannel channelz id to corresponding reference string.
|
||||||
|
// Note current grpc implementation doesn't allow subchannel to have subchannels
|
||||||
|
// as children, therefore, this field is unused.
|
||||||
|
SubChans map[int64]string
|
||||||
|
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||||
|
// from socket channelz id to corresponding reference string.
|
||||||
|
Sockets map[int64]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||||
|
// should return from ChannelzMetric().
|
||||||
|
type ChannelInternalMetric struct {
|
||||||
|
// current connectivity state of the channel.
|
||||||
|
State connectivity.State
|
||||||
|
// The target this channel originally tried to connect to. May be absent
|
||||||
|
Target string
|
||||||
|
// The number of calls started on the channel.
|
||||||
|
CallsStarted int64
|
||||||
|
// The number of calls that have completed with an OK status.
|
||||||
|
CallsSucceeded int64
|
||||||
|
// The number of calls that have a completed with a non-OK status.
|
||||||
|
CallsFailed int64
|
||||||
|
// The last time a call was started on the channel.
|
||||||
|
LastCallStartedTimestamp time.Time
|
||||||
|
//TODO: trace
|
||||||
|
}
|
||||||
|
|
||||||
|
// Channel is the interface that should be satisfied in order to be tracked by
|
||||||
|
// channelz as Channel or SubChannel.
|
||||||
|
type Channel interface {
|
||||||
|
ChannelzMetric() *ChannelInternalMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
type channel struct {
|
||||||
|
refName string
|
||||||
|
c Channel
|
||||||
|
closeCalled bool
|
||||||
|
nestedChans map[int64]string
|
||||||
|
subChans map[int64]string
|
||||||
|
id int64
|
||||||
|
pid int64
|
||||||
|
cm *channelMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) addChild(id int64, e entry) {
|
||||||
|
switch v := e.(type) {
|
||||||
|
case *subChannel:
|
||||||
|
c.subChans[id] = v.refName
|
||||||
|
case *channel:
|
||||||
|
c.nestedChans[id] = v.refName
|
||||||
|
default:
|
||||||
|
grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) deleteChild(id int64) {
|
||||||
|
delete(c.subChans, id)
|
||||||
|
delete(c.nestedChans, id)
|
||||||
|
c.deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) triggerDelete() {
|
||||||
|
c.closeCalled = true
|
||||||
|
c.deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) deleteSelfIfReady() {
|
||||||
|
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.cm.deleteEntry(c.id)
|
||||||
|
// not top channel
|
||||||
|
if c.pid != 0 {
|
||||||
|
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type subChannel struct {
|
||||||
|
refName string
|
||||||
|
c Channel
|
||||||
|
closeCalled bool
|
||||||
|
sockets map[int64]string
|
||||||
|
id int64
|
||||||
|
pid int64
|
||||||
|
cm *channelMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) addChild(id int64, e entry) {
|
||||||
|
if v, ok := e.(*normalSocket); ok {
|
||||||
|
sc.sockets[id] = v.refName
|
||||||
|
} else {
|
||||||
|
grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) deleteChild(id int64) {
|
||||||
|
delete(sc.sockets, id)
|
||||||
|
sc.deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) triggerDelete() {
|
||||||
|
sc.closeCalled = true
|
||||||
|
sc.deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) deleteSelfIfReady() {
|
||||||
|
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sc.cm.deleteEntry(sc.id)
|
||||||
|
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||||
|
// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
|
||||||
|
type SocketMetric struct {
|
||||||
|
// ID is the channelz id of this socket.
|
||||||
|
ID int64
|
||||||
|
// RefName is the human readable reference string of this socket.
|
||||||
|
RefName string
|
||||||
|
// SocketData contains socket internal metric reported by the socket through
|
||||||
|
// ChannelzMetric().
|
||||||
|
SocketData *SocketInternalMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
// SocketInternalMetric defines the struct that the implementor of Socket interface
|
||||||
|
// should return from ChannelzMetric().
|
||||||
|
type SocketInternalMetric struct {
|
||||||
|
// The number of streams that have been started.
|
||||||
|
StreamsStarted int64
|
||||||
|
// The number of streams that have ended successfully:
|
||||||
|
// On client side, receiving frame with eos bit set.
|
||||||
|
// On server side, sending frame with eos bit set.
|
||||||
|
StreamsSucceeded int64
|
||||||
|
// The number of streams that have ended unsuccessfully:
|
||||||
|
// On client side, termination without receiving frame with eos bit set.
|
||||||
|
// On server side, termination without sending frame with eos bit set.
|
||||||
|
StreamsFailed int64
|
||||||
|
// The number of messages successfully sent on this socket.
|
||||||
|
MessagesSent int64
|
||||||
|
MessagesReceived int64
|
||||||
|
// The number of keep alives sent. This is typically implemented with HTTP/2
|
||||||
|
// ping messages.
|
||||||
|
KeepAlivesSent int64
|
||||||
|
// The last time a stream was created by this endpoint. Usually unset for
|
||||||
|
// servers.
|
||||||
|
LastLocalStreamCreatedTimestamp time.Time
|
||||||
|
// The last time a stream was created by the remote endpoint. Usually unset
|
||||||
|
// for clients.
|
||||||
|
LastRemoteStreamCreatedTimestamp time.Time
|
||||||
|
// The last time a message was sent by this endpoint.
|
||||||
|
LastMessageSentTimestamp time.Time
|
||||||
|
// The last time a message was received by this endpoint.
|
||||||
|
LastMessageReceivedTimestamp time.Time
|
||||||
|
// The amount of window, granted to the local endpoint by the remote endpoint.
|
||||||
|
// This may be slightly out of date due to network latency. This does NOT
|
||||||
|
// include stream level or TCP level flow control info.
|
||||||
|
LocalFlowControlWindow int64
|
||||||
|
// The amount of window, granted to the remote endpoint by the local endpoint.
|
||||||
|
// This may be slightly out of date due to network latency. This does NOT
|
||||||
|
// include stream level or TCP level flow control info.
|
||||||
|
RemoteFlowControlWindow int64
|
||||||
|
// The locally bound address.
|
||||||
|
LocalAddr net.Addr
|
||||||
|
// The remote bound address. May be absent.
|
||||||
|
RemoteAddr net.Addr
|
||||||
|
// Optional, represents the name of the remote endpoint, if different than
|
||||||
|
// the original target name.
|
||||||
|
RemoteName string
|
||||||
|
//TODO: socket options
|
||||||
|
//TODO: Security
|
||||||
|
}
|
||||||
|
|
||||||
|
// Socket is the interface that should be satisfied in order to be tracked by
|
||||||
|
// channelz as Socket.
|
||||||
|
type Socket interface {
|
||||||
|
ChannelzMetric() *SocketInternalMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
type listenSocket struct {
|
||||||
|
refName string
|
||||||
|
s Socket
|
||||||
|
id int64
|
||||||
|
pid int64
|
||||||
|
cm *channelMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||||
|
grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *listenSocket) deleteChild(id int64) {
|
||||||
|
grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *listenSocket) triggerDelete() {
|
||||||
|
ls.cm.deleteEntry(ls.id)
|
||||||
|
ls.cm.findEntry(ls.pid).deleteChild(ls.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *listenSocket) deleteSelfIfReady() {
|
||||||
|
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||||
|
}
|
||||||
|
|
||||||
|
type normalSocket struct {
|
||||||
|
refName string
|
||||||
|
s Socket
|
||||||
|
id int64
|
||||||
|
pid int64
|
||||||
|
cm *channelMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||||
|
grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *normalSocket) deleteChild(id int64) {
|
||||||
|
grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *normalSocket) triggerDelete() {
|
||||||
|
ns.cm.deleteEntry(ns.id)
|
||||||
|
ns.cm.findEntry(ns.pid).deleteChild(ns.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *normalSocket) deleteSelfIfReady() {
|
||||||
|
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||||
|
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||||
|
// child list, etc.
|
||||||
|
type ServerMetric struct {
|
||||||
|
// ID is the channelz id of this server.
|
||||||
|
ID int64
|
||||||
|
// RefName is the human readable reference string of this server.
|
||||||
|
RefName string
|
||||||
|
// ServerData contains server internal metric reported by the server through
|
||||||
|
// ChannelzMetric().
|
||||||
|
ServerData *ServerInternalMetric
|
||||||
|
// ListenSockets tracks the listener socket type children of this server in the
|
||||||
|
// format of a map from socket channelz id to corresponding reference string.
|
||||||
|
ListenSockets map[int64]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInternalMetric defines the struct that the implementor of Server interface
|
||||||
|
// should return from ChannelzMetric().
|
||||||
|
type ServerInternalMetric struct {
|
||||||
|
// The number of incoming calls started on the server.
|
||||||
|
CallsStarted int64
|
||||||
|
// The number of incoming calls that have completed with an OK status.
|
||||||
|
CallsSucceeded int64
|
||||||
|
// The number of incoming calls that have a completed with a non-OK status.
|
||||||
|
CallsFailed int64
|
||||||
|
// The last time a call was started on the server.
|
||||||
|
LastCallStartedTimestamp time.Time
|
||||||
|
//TODO: trace
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||||
|
// Server.
|
||||||
|
type Server interface {
|
||||||
|
ChannelzMetric() *ServerInternalMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
type server struct {
|
||||||
|
refName string
|
||||||
|
s Server
|
||||||
|
closeCalled bool
|
||||||
|
sockets map[int64]string
|
||||||
|
listenSockets map[int64]string
|
||||||
|
id int64
|
||||||
|
cm *channelMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) addChild(id int64, e entry) {
|
||||||
|
switch v := e.(type) {
|
||||||
|
case *normalSocket:
|
||||||
|
s.sockets[id] = v.refName
|
||||||
|
case *listenSocket:
|
||||||
|
s.listenSockets[id] = v.refName
|
||||||
|
default:
|
||||||
|
grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) deleteChild(id int64) {
|
||||||
|
delete(s.sockets, id)
|
||||||
|
delete(s.listenSockets, id)
|
||||||
|
s.deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) triggerDelete() {
|
||||||
|
s.closeCalled = true
|
||||||
|
s.deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) deleteSelfIfReady() {
|
||||||
|
if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.cm.deleteEntry(s.id)
|
||||||
|
}
|
247
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
247
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
|||||||
"golang.org/x/net/trace"
|
"golang.org/x/net/trace"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
||||||
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
@ -45,6 +46,11 @@ import (
|
|||||||
"google.golang.org/grpc/transport"
|
"google.golang.org/grpc/transport"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// minimum time to give a connection to complete
|
||||||
|
minConnectTimeout = 20 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrClientConnClosing indicates that the operation is illegal because
|
// ErrClientConnClosing indicates that the operation is illegal because
|
||||||
// the ClientConn is closing.
|
// the ClientConn is closing.
|
||||||
@ -60,8 +66,11 @@ var (
|
|||||||
errConnUnavailable = errors.New("grpc: the connection is unavailable")
|
errConnUnavailable = errors.New("grpc: the connection is unavailable")
|
||||||
// errBalancerClosed indicates that the balancer is closed.
|
// errBalancerClosed indicates that the balancer is closed.
|
||||||
errBalancerClosed = errors.New("grpc: balancer is closed")
|
errBalancerClosed = errors.New("grpc: balancer is closed")
|
||||||
// minimum time to give a connection to complete
|
// We use an accessor so that minConnectTimeout can be
|
||||||
minConnectTimeout = 20 * time.Second
|
// atomically read and updated while testing.
|
||||||
|
getMinConnectTimeout = func() time.Duration {
|
||||||
|
return minConnectTimeout
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following errors are returned from Dial and DialContext
|
// The following errors are returned from Dial and DialContext
|
||||||
@ -99,8 +108,10 @@ type dialOptions struct {
|
|||||||
// balancer, and also by WithBalancerName dial option.
|
// balancer, and also by WithBalancerName dial option.
|
||||||
balancerBuilder balancer.Builder
|
balancerBuilder balancer.Builder
|
||||||
// This is to support grpclb.
|
// This is to support grpclb.
|
||||||
resolverBuilder resolver.Builder
|
resolverBuilder resolver.Builder
|
||||||
waitForHandshake bool
|
waitForHandshake bool
|
||||||
|
channelzParentID int64
|
||||||
|
disableServiceConfig bool
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -108,6 +119,12 @@ const (
|
|||||||
defaultClientMaxSendMessageSize = math.MaxInt32
|
defaultClientMaxSendMessageSize = math.MaxInt32
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RegisterChannelz turns on channelz service.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func RegisterChannelz() {
|
||||||
|
channelz.TurnOn()
|
||||||
|
}
|
||||||
|
|
||||||
// DialOption configures how we set up the connection.
|
// DialOption configures how we set up the connection.
|
||||||
type DialOption func(*dialOptions)
|
type DialOption func(*dialOptions)
|
||||||
|
|
||||||
@ -152,7 +169,9 @@ func WithInitialConnWindowSize(s int32) DialOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
|
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive.
|
||||||
|
//
|
||||||
|
// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
|
||||||
func WithMaxMsgSize(s int) DialOption {
|
func WithMaxMsgSize(s int) DialOption {
|
||||||
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
|
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
|
||||||
}
|
}
|
||||||
@ -235,7 +254,8 @@ func withResolverBuilder(b resolver.Builder) DialOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
|
// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
|
||||||
// DEPRECATED: service config should be received through name resolver, as specified here.
|
//
|
||||||
|
// Deprecated: service config should be received through name resolver, as specified here.
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||||
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||||
return func(o *dialOptions) {
|
return func(o *dialOptions) {
|
||||||
@ -306,6 +326,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
|||||||
|
|
||||||
// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
|
// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
|
||||||
// initially. This is valid if and only if WithBlock() is present.
|
// initially. This is valid if and only if WithBlock() is present.
|
||||||
|
//
|
||||||
// Deprecated: use DialContext and context.WithTimeout instead.
|
// Deprecated: use DialContext and context.WithTimeout instead.
|
||||||
func WithTimeout(d time.Duration) DialOption {
|
func WithTimeout(d time.Duration) DialOption {
|
||||||
return func(o *dialOptions) {
|
return func(o *dialOptions) {
|
||||||
@ -388,15 +409,40 @@ func WithAuthority(a string) DialOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithChannelzParentID returns a DialOption that specifies the channelz ID of current ClientConn's
|
||||||
|
// parent. This function is used in nested channel creation (e.g. grpclb dial).
|
||||||
|
func WithChannelzParentID(id int64) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.channelzParentID = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
|
||||||
|
// service config provided by the resolver and provides a hint to the resolver
|
||||||
|
// to not fetch service configs.
|
||||||
|
func WithDisableServiceConfig() DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.disableServiceConfig = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Dial creates a client connection to the given target.
|
// Dial creates a client connection to the given target.
|
||||||
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
||||||
return DialContext(context.Background(), target, opts...)
|
return DialContext(context.Background(), target, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DialContext creates a client connection to the given target. ctx can be used to
|
// DialContext creates a client connection to the given target. By default, it's
|
||||||
// cancel or expire the pending connection. Once this function returns, the
|
// a non-blocking dial (the function won't wait for connections to be
|
||||||
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
|
// established, and connecting happens in the background). To make it a blocking
|
||||||
// to terminate all the pending operations after this function returns.
|
// dial, use WithBlock() dial option.
|
||||||
|
//
|
||||||
|
// In the non-blocking case, the ctx does not act against the connection. It
|
||||||
|
// only controls the setup steps.
|
||||||
|
//
|
||||||
|
// In the blocking case, ctx can be used to cancel or expire the pending
|
||||||
|
// connection. Once this function returns, the cancellation and expiration of
|
||||||
|
// ctx will be noop. Users should call ClientConn.Close to terminate all the
|
||||||
|
// pending operations after this function returns.
|
||||||
//
|
//
|
||||||
// The target name syntax is defined in
|
// The target name syntax is defined in
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||||
@ -415,6 +461,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||||||
opt(&cc.dopts)
|
opt(&cc.dopts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if channelz.IsOn() {
|
||||||
|
if cc.dopts.channelzParentID != 0 {
|
||||||
|
cc.channelzID = channelz.RegisterChannel(cc, cc.dopts.channelzParentID, target)
|
||||||
|
} else {
|
||||||
|
cc.channelzID = channelz.RegisterChannel(cc, 0, target)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !cc.dopts.insecure {
|
if !cc.dopts.insecure {
|
||||||
if cc.dopts.copts.TransportCredentials == nil {
|
if cc.dopts.copts.TransportCredentials == nil {
|
||||||
return nil, errNoTransportSecurity
|
return nil, errNoTransportSecurity
|
||||||
@ -435,7 +489,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||||||
if cc.dopts.copts.Dialer == nil {
|
if cc.dopts.copts.Dialer == nil {
|
||||||
cc.dopts.copts.Dialer = newProxyDialer(
|
cc.dopts.copts.Dialer = newProxyDialer(
|
||||||
func(ctx context.Context, addr string) (net.Conn, error) {
|
func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
return dialContext(ctx, "tcp", addr)
|
network, addr := parseDialTarget(addr)
|
||||||
|
return dialContext(ctx, network, addr)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -529,8 +584,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||||||
credsClone = creds.Clone()
|
credsClone = creds.Clone()
|
||||||
}
|
}
|
||||||
cc.balancerBuildOpts = balancer.BuildOptions{
|
cc.balancerBuildOpts = balancer.BuildOptions{
|
||||||
DialCreds: credsClone,
|
DialCreds: credsClone,
|
||||||
Dialer: cc.dopts.copts.Dialer,
|
Dialer: cc.dopts.copts.Dialer,
|
||||||
|
ChannelzParentID: cc.channelzID,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the resolver.
|
// Build the resolver.
|
||||||
@ -632,6 +688,13 @@ type ClientConn struct {
|
|||||||
preBalancerName string // previous balancer name.
|
preBalancerName string // previous balancer name.
|
||||||
curAddresses []resolver.Address
|
curAddresses []resolver.Address
|
||||||
balancerWrapper *ccBalancerWrapper
|
balancerWrapper *ccBalancerWrapper
|
||||||
|
|
||||||
|
channelzID int64 // channelz unique identification number
|
||||||
|
czmu sync.RWMutex
|
||||||
|
callsStarted int64
|
||||||
|
callsSucceeded int64
|
||||||
|
callsFailed int64
|
||||||
|
lastCallStartedTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||||||
@ -756,6 +819,8 @@ func (cc *ClientConn) switchBalancer(name string) {
|
|||||||
if cc.balancerWrapper != nil {
|
if cc.balancerWrapper != nil {
|
||||||
cc.balancerWrapper.close()
|
cc.balancerWrapper.close()
|
||||||
}
|
}
|
||||||
|
// Clear all stickiness state.
|
||||||
|
cc.blockingpicker.clearStickinessState()
|
||||||
|
|
||||||
builder := balancer.Get(name)
|
builder := balancer.Get(name)
|
||||||
if builder == nil {
|
if builder == nil {
|
||||||
@ -795,6 +860,9 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
|
|||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return nil, ErrClientConnClosing
|
return nil, ErrClientConnClosing
|
||||||
}
|
}
|
||||||
|
if channelz.IsOn() {
|
||||||
|
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
|
||||||
|
}
|
||||||
cc.conns[ac] = struct{}{}
|
cc.conns[ac] = struct{}{}
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return ac, nil
|
return ac, nil
|
||||||
@ -813,6 +881,42 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
|
|||||||
ac.tearDown(err)
|
ac.tearDown(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChannelzMetric returns ChannelInternalMetric of current ClientConn.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func (cc *ClientConn) ChannelzMetric() *channelz.ChannelInternalMetric {
|
||||||
|
state := cc.GetState()
|
||||||
|
cc.czmu.RLock()
|
||||||
|
defer cc.czmu.RUnlock()
|
||||||
|
return &channelz.ChannelInternalMetric{
|
||||||
|
State: state,
|
||||||
|
Target: cc.target,
|
||||||
|
CallsStarted: cc.callsStarted,
|
||||||
|
CallsSucceeded: cc.callsSucceeded,
|
||||||
|
CallsFailed: cc.callsFailed,
|
||||||
|
LastCallStartedTimestamp: cc.lastCallStartedTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) incrCallsStarted() {
|
||||||
|
cc.czmu.Lock()
|
||||||
|
cc.callsStarted++
|
||||||
|
// TODO(yuxuanli): will make this a time.Time pointer improve performance?
|
||||||
|
cc.lastCallStartedTime = time.Now()
|
||||||
|
cc.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) incrCallsSucceeded() {
|
||||||
|
cc.czmu.Lock()
|
||||||
|
cc.callsSucceeded++
|
||||||
|
cc.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) incrCallsFailed() {
|
||||||
|
cc.czmu.Lock()
|
||||||
|
cc.callsFailed++
|
||||||
|
cc.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// connect starts to creating transport and also starts the transport monitor
|
// connect starts to creating transport and also starts the transport monitor
|
||||||
// goroutine for this ac.
|
// goroutine for this ac.
|
||||||
// It does nothing if the ac is not IDLE.
|
// It does nothing if the ac is not IDLE.
|
||||||
@ -883,7 +987,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
|||||||
// the corresponding MethodConfig.
|
// the corresponding MethodConfig.
|
||||||
// If there isn't an exact match for the input method, we look for the default config
|
// If there isn't an exact match for the input method, we look for the default config
|
||||||
// under the service (i.e /service/). If there is a default MethodConfig for
|
// under the service (i.e /service/). If there is a default MethodConfig for
|
||||||
// the serivce, we return it.
|
// the service, we return it.
|
||||||
// Otherwise, we return an empty MethodConfig.
|
// Otherwise, we return an empty MethodConfig.
|
||||||
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
|
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
|
||||||
// TODO: Avoid the locking here.
|
// TODO: Avoid the locking here.
|
||||||
@ -892,7 +996,7 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
|
|||||||
m, ok := cc.sc.Methods[method]
|
m, ok := cc.sc.Methods[method]
|
||||||
if !ok {
|
if !ok {
|
||||||
i := strings.LastIndex(method, "/")
|
i := strings.LastIndex(method, "/")
|
||||||
m, _ = cc.sc.Methods[method[:i+1]]
|
m = cc.sc.Methods[method[:i+1]]
|
||||||
}
|
}
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
@ -908,6 +1012,9 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transpor
|
|||||||
// handleServiceConfig parses the service config string in JSON format to Go native
|
// handleServiceConfig parses the service config string in JSON format to Go native
|
||||||
// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
|
// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
|
||||||
func (cc *ClientConn) handleServiceConfig(js string) error {
|
func (cc *ClientConn) handleServiceConfig(js string) error {
|
||||||
|
if cc.dopts.disableServiceConfig {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
sc, err := parseServiceConfig(js)
|
sc, err := parseServiceConfig(js)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -928,6 +1035,18 @@ func (cc *ClientConn) handleServiceConfig(js string) error {
|
|||||||
cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
|
cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if envConfigStickinessOn {
|
||||||
|
var newStickinessMDKey string
|
||||||
|
if sc.stickinessMetadataKey != nil && *sc.stickinessMetadataKey != "" {
|
||||||
|
newStickinessMDKey = *sc.stickinessMetadataKey
|
||||||
|
}
|
||||||
|
// newStickinessMDKey is "" if one of the following happens:
|
||||||
|
// - stickinessMetadataKey is set to ""
|
||||||
|
// - stickinessMetadataKey field doesn't exist in service config
|
||||||
|
cc.blockingpicker.updateStickinessMDKey(strings.ToLower(newStickinessMDKey))
|
||||||
|
}
|
||||||
|
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -944,7 +1063,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
|
|||||||
|
|
||||||
// Close tears down the ClientConn and all underlying connections.
|
// Close tears down the ClientConn and all underlying connections.
|
||||||
func (cc *ClientConn) Close() error {
|
func (cc *ClientConn) Close() error {
|
||||||
cc.cancel()
|
defer cc.cancel()
|
||||||
|
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
if cc.conns == nil {
|
if cc.conns == nil {
|
||||||
@ -960,16 +1079,22 @@ func (cc *ClientConn) Close() error {
|
|||||||
bWrapper := cc.balancerWrapper
|
bWrapper := cc.balancerWrapper
|
||||||
cc.balancerWrapper = nil
|
cc.balancerWrapper = nil
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
|
||||||
cc.blockingpicker.close()
|
cc.blockingpicker.close()
|
||||||
|
|
||||||
if rWrapper != nil {
|
if rWrapper != nil {
|
||||||
rWrapper.close()
|
rWrapper.close()
|
||||||
}
|
}
|
||||||
if bWrapper != nil {
|
if bWrapper != nil {
|
||||||
bWrapper.close()
|
bWrapper.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
for ac := range conns {
|
for ac := range conns {
|
||||||
ac.tearDown(ErrClientConnClosing)
|
ac.tearDown(ErrClientConnClosing)
|
||||||
}
|
}
|
||||||
|
if channelz.IsOn() {
|
||||||
|
channelz.RemoveEntry(cc.channelzID)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1003,6 +1128,13 @@ type addrConn struct {
|
|||||||
// connectDeadline is the time by which all connection
|
// connectDeadline is the time by which all connection
|
||||||
// negotiations must complete.
|
// negotiations must complete.
|
||||||
connectDeadline time.Time
|
connectDeadline time.Time
|
||||||
|
|
||||||
|
channelzID int64 // channelz unique identification number
|
||||||
|
czmu sync.RWMutex
|
||||||
|
callsStarted int64
|
||||||
|
callsSucceeded int64
|
||||||
|
callsFailed int64
|
||||||
|
lastCallStartedTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// adjustParams updates parameters used to create transports upon
|
// adjustParams updates parameters used to create transports upon
|
||||||
@ -1038,7 +1170,7 @@ func (ac *addrConn) errorf(format string, a ...interface{}) {
|
|||||||
// resetTransport recreates a transport to the address for ac. The old
|
// resetTransport recreates a transport to the address for ac. The old
|
||||||
// transport will close itself on error or when the clientconn is closed.
|
// transport will close itself on error or when the clientconn is closed.
|
||||||
// The created transport must receive initial settings frame from the server.
|
// The created transport must receive initial settings frame from the server.
|
||||||
// In case that doesnt happen, transportMonitor will kill the newly created
|
// In case that doesn't happen, transportMonitor will kill the newly created
|
||||||
// transport after connectDeadline has expired.
|
// transport after connectDeadline has expired.
|
||||||
// In case there was an error on the transport before the settings frame was
|
// In case there was an error on the transport before the settings frame was
|
||||||
// received, resetTransport resumes connecting to backends after the one that
|
// received, resetTransport resumes connecting to backends after the one that
|
||||||
@ -1073,7 +1205,7 @@ func (ac *addrConn) resetTransport() error {
|
|||||||
// connection.
|
// connection.
|
||||||
backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
|
backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
|
||||||
// This will be the duration that dial gets to finish.
|
// This will be the duration that dial gets to finish.
|
||||||
dialDuration := minConnectTimeout
|
dialDuration := getMinConnectTimeout()
|
||||||
if backoffFor > dialDuration {
|
if backoffFor > dialDuration {
|
||||||
// Give dial more time as we keep failing to connect.
|
// Give dial more time as we keep failing to connect.
|
||||||
dialDuration = backoffFor
|
dialDuration = backoffFor
|
||||||
@ -1083,7 +1215,7 @@ func (ac *addrConn) resetTransport() error {
|
|||||||
connectDeadline = start.Add(dialDuration)
|
connectDeadline = start.Add(dialDuration)
|
||||||
ridx = 0 // Start connecting from the beginning.
|
ridx = 0 // Start connecting from the beginning.
|
||||||
} else {
|
} else {
|
||||||
// Continue trying to conect with the same deadlines.
|
// Continue trying to connect with the same deadlines.
|
||||||
connectRetryNum = ac.connectRetryNum
|
connectRetryNum = ac.connectRetryNum
|
||||||
backoffDeadline = ac.backoffDeadline
|
backoffDeadline = ac.backoffDeadline
|
||||||
connectDeadline = ac.connectDeadline
|
connectDeadline = ac.connectDeadline
|
||||||
@ -1144,18 +1276,13 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline,
|
|||||||
// Do not cancel in the success path because of
|
// Do not cancel in the success path because of
|
||||||
// this issue in Go1.6: https://github.com/golang/go/issues/15078.
|
// this issue in Go1.6: https://github.com/golang/go/issues/15078.
|
||||||
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||||||
|
if channelz.IsOn() {
|
||||||
|
copts.ChannelzParentID = ac.channelzID
|
||||||
|
}
|
||||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
|
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
|
ac.cc.blockingpicker.updateConnectionError(err)
|
||||||
ac.mu.Lock()
|
|
||||||
if ac.state != connectivity.Shutdown {
|
|
||||||
ac.state = connectivity.TransientFailure
|
|
||||||
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
|
||||||
}
|
|
||||||
ac.mu.Unlock()
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
if ac.state == connectivity.Shutdown {
|
if ac.state == connectivity.Shutdown {
|
||||||
// ac.tearDown(...) has been invoked.
|
// ac.tearDown(...) has been invoked.
|
||||||
@ -1207,6 +1334,10 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline,
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
|
if ac.state == connectivity.Shutdown {
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return false, errConnClosing
|
||||||
|
}
|
||||||
ac.state = connectivity.TransientFailure
|
ac.state = connectivity.TransientFailure
|
||||||
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
||||||
ac.cc.resolveNow(resolver.ResolveNowOption{})
|
ac.cc.resolveNow(resolver.ResolveNowOption{})
|
||||||
@ -1241,7 +1372,20 @@ func (ac *addrConn) transportMonitor() {
|
|||||||
// Block until we receive a goaway or an error occurs.
|
// Block until we receive a goaway or an error occurs.
|
||||||
select {
|
select {
|
||||||
case <-t.GoAway():
|
case <-t.GoAway():
|
||||||
|
done := t.Error()
|
||||||
|
cleanup := t.Close
|
||||||
|
// Since this transport will be orphaned (won't have a transportMonitor)
|
||||||
|
// we need to launch a goroutine to keep track of clientConn.Close()
|
||||||
|
// happening since it might not be noticed by any other goroutine for a while.
|
||||||
|
go func() {
|
||||||
|
<-done
|
||||||
|
cleanup()
|
||||||
|
}()
|
||||||
case <-t.Error():
|
case <-t.Error():
|
||||||
|
// In case this is triggered because clientConn.Close()
|
||||||
|
// was called, we want to immeditately close the transport
|
||||||
|
// since no other goroutine might notice it for a while.
|
||||||
|
t.Close()
|
||||||
case <-cdeadline:
|
case <-cdeadline:
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
// This implies that client received server preface.
|
// This implies that client received server preface.
|
||||||
@ -1385,7 +1529,9 @@ func (ac *addrConn) tearDown(err error) {
|
|||||||
close(ac.ready)
|
close(ac.ready)
|
||||||
ac.ready = nil
|
ac.ready = nil
|
||||||
}
|
}
|
||||||
return
|
if channelz.IsOn() {
|
||||||
|
channelz.RemoveEntry(ac.channelzID)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *addrConn) getState() connectivity.State {
|
func (ac *addrConn) getState() connectivity.State {
|
||||||
@ -1394,6 +1540,49 @@ func (ac *addrConn) getState() connectivity.State {
|
|||||||
return ac.state
|
return ac.state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ac *addrConn) getCurAddr() (ret resolver.Address) {
|
||||||
|
ac.mu.Lock()
|
||||||
|
ret = ac.curAddr
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
|
||||||
|
ac.mu.Lock()
|
||||||
|
addr := ac.curAddr.Addr
|
||||||
|
ac.mu.Unlock()
|
||||||
|
state := ac.getState()
|
||||||
|
ac.czmu.RLock()
|
||||||
|
defer ac.czmu.RUnlock()
|
||||||
|
return &channelz.ChannelInternalMetric{
|
||||||
|
State: state,
|
||||||
|
Target: addr,
|
||||||
|
CallsStarted: ac.callsStarted,
|
||||||
|
CallsSucceeded: ac.callsSucceeded,
|
||||||
|
CallsFailed: ac.callsFailed,
|
||||||
|
LastCallStartedTimestamp: ac.lastCallStartedTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *addrConn) incrCallsStarted() {
|
||||||
|
ac.czmu.Lock()
|
||||||
|
ac.callsStarted++
|
||||||
|
ac.lastCallStartedTime = time.Now()
|
||||||
|
ac.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *addrConn) incrCallsSucceeded() {
|
||||||
|
ac.czmu.Lock()
|
||||||
|
ac.callsSucceeded++
|
||||||
|
ac.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *addrConn) incrCallsFailed() {
|
||||||
|
ac.czmu.Lock()
|
||||||
|
ac.callsFailed++
|
||||||
|
ac.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
|
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
|
||||||
// underlying connections within the specified timeout.
|
// underlying connections within the specified timeout.
|
||||||
//
|
//
|
||||||
|
2
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
2
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
@ -82,7 +82,7 @@ type Codec interface {
|
|||||||
Name() string
|
Name() string
|
||||||
}
|
}
|
||||||
|
|
||||||
var registeredCodecs = make(map[string]Codec, 0)
|
var registeredCodecs = make(map[string]Codec)
|
||||||
|
|
||||||
// RegisterCodec registers the provided Codec for use with all gRPC clients and
|
// RegisterCodec registers the provided Codec for use with all gRPC clients and
|
||||||
// servers.
|
// servers.
|
||||||
|
37
vendor/google.golang.org/grpc/envconfig.go
generated
vendored
Normal file
37
vendor/google.golang.org/grpc/envconfig.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
envConfigPrefix = "GRPC_GO_"
|
||||||
|
envConfigStickinessStr = envConfigPrefix + "STICKINESS"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
envConfigStickinessOn bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on")
|
||||||
|
}
|
29
vendor/google.golang.org/grpc/go16.go
generated
vendored
29
vendor/google.golang.org/grpc/go16.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@ -69,31 +68,3 @@ func toRPCErr(err error) error {
|
|||||||
}
|
}
|
||||||
return status.Error(codes.Unknown, err.Error())
|
return status.Error(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertCode converts a standard Go error into its canonical code. Note that
|
|
||||||
// this is only used to translate the error returned by the server applications.
|
|
||||||
func convertCode(err error) codes.Code {
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return codes.OK
|
|
||||||
case io.EOF:
|
|
||||||
return codes.OutOfRange
|
|
||||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
|
||||||
return codes.FailedPrecondition
|
|
||||||
case os.ErrInvalid:
|
|
||||||
return codes.InvalidArgument
|
|
||||||
case context.Canceled:
|
|
||||||
return codes.Canceled
|
|
||||||
case context.DeadlineExceeded:
|
|
||||||
return codes.DeadlineExceeded
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case os.IsExist(err):
|
|
||||||
return codes.AlreadyExists
|
|
||||||
case os.IsNotExist(err):
|
|
||||||
return codes.NotFound
|
|
||||||
case os.IsPermission(err):
|
|
||||||
return codes.PermissionDenied
|
|
||||||
}
|
|
||||||
return codes.Unknown
|
|
||||||
}
|
|
||||||
|
29
vendor/google.golang.org/grpc/go17.go
generated
vendored
29
vendor/google.golang.org/grpc/go17.go
generated
vendored
@ -26,7 +26,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
netctx "golang.org/x/net/context"
|
netctx "golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@ -70,31 +69,3 @@ func toRPCErr(err error) error {
|
|||||||
}
|
}
|
||||||
return status.Error(codes.Unknown, err.Error())
|
return status.Error(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertCode converts a standard Go error into its canonical code. Note that
|
|
||||||
// this is only used to translate the error returned by the server applications.
|
|
||||||
func convertCode(err error) codes.Code {
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return codes.OK
|
|
||||||
case io.EOF:
|
|
||||||
return codes.OutOfRange
|
|
||||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
|
||||||
return codes.FailedPrecondition
|
|
||||||
case os.ErrInvalid:
|
|
||||||
return codes.InvalidArgument
|
|
||||||
case context.Canceled, netctx.Canceled:
|
|
||||||
return codes.Canceled
|
|
||||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
|
||||||
return codes.DeadlineExceeded
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case os.IsExist(err):
|
|
||||||
return codes.AlreadyExists
|
|
||||||
case os.IsNotExist(err):
|
|
||||||
return codes.NotFound
|
|
||||||
case os.IsPermission(err):
|
|
||||||
return codes.PermissionDenied
|
|
||||||
}
|
|
||||||
return codes.Unknown
|
|
||||||
}
|
|
||||||
|
9
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
9
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
@ -58,7 +58,7 @@ func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption
|
|||||||
ServerStreams: true,
|
ServerStreams: true,
|
||||||
ClientStreams: true,
|
ClientStreams: true,
|
||||||
}
|
}
|
||||||
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -127,7 +127,7 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
|
|||||||
}
|
}
|
||||||
|
|
||||||
lb := &lbBalancer{
|
lb := &lbBalancer{
|
||||||
cc: cc,
|
cc: newLBCacheClientConn(cc),
|
||||||
target: target,
|
target: target,
|
||||||
opt: opt,
|
opt: opt,
|
||||||
fallbackTimeout: b.fallbackTimeout,
|
fallbackTimeout: b.fallbackTimeout,
|
||||||
@ -145,7 +145,7 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
|
|||||||
}
|
}
|
||||||
|
|
||||||
type lbBalancer struct {
|
type lbBalancer struct {
|
||||||
cc balancer.ClientConn
|
cc *lbCacheClientConn
|
||||||
target string
|
target string
|
||||||
opt balancer.BuildOptions
|
opt balancer.BuildOptions
|
||||||
fallbackTimeout time.Duration
|
fallbackTimeout time.Duration
|
||||||
@ -220,7 +220,6 @@ func (lb *lbBalancer) regeneratePicker() {
|
|||||||
subConns: readySCs,
|
subConns: readySCs,
|
||||||
stats: lb.clientStats,
|
stats: lb.clientStats,
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||||
@ -257,7 +256,6 @@ func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivi
|
|||||||
}
|
}
|
||||||
|
|
||||||
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
|
// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
|
||||||
@ -339,4 +337,5 @@ func (lb *lbBalancer) Close() {
|
|||||||
if lb.ccRemoteLB != nil {
|
if lb.ccRemoteLB != nil {
|
||||||
lb.ccRemoteLB.Close()
|
lb.ccRemoteLB.Close()
|
||||||
}
|
}
|
||||||
|
lb.cc.close()
|
||||||
}
|
}
|
||||||
|
410
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
generated
vendored
410
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
generated
vendored
@ -1,24 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: grpc_lb_v1/messages/messages.proto
|
// source: grpc_lb_v1/messages/messages.proto
|
||||||
|
|
||||||
/*
|
package messages // import "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||||
Package messages is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
grpc_lb_v1/messages/messages.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Duration
|
|
||||||
Timestamp
|
|
||||||
LoadBalanceRequest
|
|
||||||
InitialLoadBalanceRequest
|
|
||||||
ClientStats
|
|
||||||
LoadBalanceResponse
|
|
||||||
InitialLoadBalanceResponse
|
|
||||||
ServerList
|
|
||||||
Server
|
|
||||||
*/
|
|
||||||
package messages
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -45,13 +28,35 @@ type Duration struct {
|
|||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
// of one second or more, a non-zero value for the `nanos` field must be
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||||
// to +999,999,999 inclusive.
|
// to +999,999,999 inclusive.
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Duration) Reset() { *m = Duration{} }
|
func (m *Duration) Reset() { *m = Duration{} }
|
||||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Duration) ProtoMessage() {}
|
func (*Duration) ProtoMessage() {}
|
||||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*Duration) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{0}
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Duration) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Duration.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Duration.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Duration) GetSeconds() int64 {
|
func (m *Duration) GetSeconds() int64 {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -76,13 +81,35 @@ type Timestamp struct {
|
|||||||
// second values with fractions must still have non-negative nanos values
|
// second values with fractions must still have non-negative nanos values
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
// that count forward in time. Must be from 0 to 999,999,999
|
||||||
// inclusive.
|
// inclusive.
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Timestamp) ProtoMessage() {}
|
func (*Timestamp) ProtoMessage() {}
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{1}
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Timestamp) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Timestamp.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Timestamp.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Timestamp) GetSeconds() int64 {
|
func (m *Timestamp) GetSeconds() int64 {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -103,12 +130,34 @@ type LoadBalanceRequest struct {
|
|||||||
// *LoadBalanceRequest_InitialRequest
|
// *LoadBalanceRequest_InitialRequest
|
||||||
// *LoadBalanceRequest_ClientStats
|
// *LoadBalanceRequest_ClientStats
|
||||||
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
|
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
|
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
|
||||||
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LoadBalanceRequest) ProtoMessage() {}
|
func (*LoadBalanceRequest) ProtoMessage() {}
|
||||||
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{2}
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LoadBalanceRequest.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceRequest) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_LoadBalanceRequest.Size(m)
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
type isLoadBalanceRequest_LoadBalanceRequestType interface {
|
type isLoadBalanceRequest_LoadBalanceRequestType interface {
|
||||||
isLoadBalanceRequest_LoadBalanceRequestType()
|
isLoadBalanceRequest_LoadBalanceRequestType()
|
||||||
@ -204,12 +253,12 @@ func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
|
|||||||
switch x := m.LoadBalanceRequestType.(type) {
|
switch x := m.LoadBalanceRequestType.(type) {
|
||||||
case *LoadBalanceRequest_InitialRequest:
|
case *LoadBalanceRequest_InitialRequest:
|
||||||
s := proto.Size(x.InitialRequest)
|
s := proto.Size(x.InitialRequest)
|
||||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
n += 1 // tag and wire
|
||||||
n += proto.SizeVarint(uint64(s))
|
n += proto.SizeVarint(uint64(s))
|
||||||
n += s
|
n += s
|
||||||
case *LoadBalanceRequest_ClientStats:
|
case *LoadBalanceRequest_ClientStats:
|
||||||
s := proto.Size(x.ClientStats)
|
s := proto.Size(x.ClientStats)
|
||||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
n += 1 // tag and wire
|
||||||
n += proto.SizeVarint(uint64(s))
|
n += proto.SizeVarint(uint64(s))
|
||||||
n += s
|
n += s
|
||||||
case nil:
|
case nil:
|
||||||
@ -222,13 +271,35 @@ func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
|
|||||||
type InitialLoadBalanceRequest struct {
|
type InitialLoadBalanceRequest struct {
|
||||||
// Name of load balanced service (IE, balancer.service.com)
|
// Name of load balanced service (IE, balancer.service.com)
|
||||||
// length should be less than 256 bytes.
|
// length should be less than 256 bytes.
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
|
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
|
||||||
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*InitialLoadBalanceRequest) ProtoMessage() {}
|
func (*InitialLoadBalanceRequest) ProtoMessage() {}
|
||||||
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{3}
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceRequest) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_InitialLoadBalanceRequest.Size(m)
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *InitialLoadBalanceRequest) GetName() string {
|
func (m *InitialLoadBalanceRequest) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -256,13 +327,35 @@ type ClientStats struct {
|
|||||||
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
|
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
|
||||||
// The total number of RPCs that finished and are known to have been received
|
// The total number of RPCs that finished and are known to have been received
|
||||||
// by a server.
|
// by a server.
|
||||||
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
|
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ClientStats) Reset() { *m = ClientStats{} }
|
func (m *ClientStats) Reset() { *m = ClientStats{} }
|
||||||
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
|
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ClientStats) ProtoMessage() {}
|
func (*ClientStats) ProtoMessage() {}
|
||||||
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
func (*ClientStats) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{4}
|
||||||
|
}
|
||||||
|
func (m *ClientStats) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ClientStats.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *ClientStats) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ClientStats.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *ClientStats) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ClientStats.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ClientStats) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ClientStats.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ClientStats proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *ClientStats) GetTimestamp() *Timestamp {
|
func (m *ClientStats) GetTimestamp() *Timestamp {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -318,12 +411,34 @@ type LoadBalanceResponse struct {
|
|||||||
// *LoadBalanceResponse_InitialResponse
|
// *LoadBalanceResponse_InitialResponse
|
||||||
// *LoadBalanceResponse_ServerList
|
// *LoadBalanceResponse_ServerList
|
||||||
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
|
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
|
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
|
||||||
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LoadBalanceResponse) ProtoMessage() {}
|
func (*LoadBalanceResponse) ProtoMessage() {}
|
||||||
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{5}
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LoadBalanceResponse.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceResponse) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_LoadBalanceResponse.Size(m)
|
||||||
|
}
|
||||||
|
func (m *LoadBalanceResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
type isLoadBalanceResponse_LoadBalanceResponseType interface {
|
type isLoadBalanceResponse_LoadBalanceResponseType interface {
|
||||||
isLoadBalanceResponse_LoadBalanceResponseType()
|
isLoadBalanceResponse_LoadBalanceResponseType()
|
||||||
@ -419,12 +534,12 @@ func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
|
|||||||
switch x := m.LoadBalanceResponseType.(type) {
|
switch x := m.LoadBalanceResponseType.(type) {
|
||||||
case *LoadBalanceResponse_InitialResponse:
|
case *LoadBalanceResponse_InitialResponse:
|
||||||
s := proto.Size(x.InitialResponse)
|
s := proto.Size(x.InitialResponse)
|
||||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
n += 1 // tag and wire
|
||||||
n += proto.SizeVarint(uint64(s))
|
n += proto.SizeVarint(uint64(s))
|
||||||
n += s
|
n += s
|
||||||
case *LoadBalanceResponse_ServerList:
|
case *LoadBalanceResponse_ServerList:
|
||||||
s := proto.Size(x.ServerList)
|
s := proto.Size(x.ServerList)
|
||||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
n += 1 // tag and wire
|
||||||
n += proto.SizeVarint(uint64(s))
|
n += proto.SizeVarint(uint64(s))
|
||||||
n += s
|
n += s
|
||||||
case nil:
|
case nil:
|
||||||
@ -445,12 +560,34 @@ type InitialLoadBalanceResponse struct {
|
|||||||
// to the load balancer. Stats should only be reported when the duration is
|
// to the load balancer. Stats should only be reported when the duration is
|
||||||
// positive.
|
// positive.
|
||||||
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
|
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
|
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
|
||||||
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*InitialLoadBalanceResponse) ProtoMessage() {}
|
func (*InitialLoadBalanceResponse) ProtoMessage() {}
|
||||||
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{6}
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceResponse) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_InitialLoadBalanceResponse.Size(m)
|
||||||
|
}
|
||||||
|
func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
|
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -471,13 +608,35 @@ type ServerList struct {
|
|||||||
// be updated when server resolutions change or as needed to balance load
|
// be updated when server resolutions change or as needed to balance load
|
||||||
// across more servers. The client should consume the server list in order
|
// across more servers. The client should consume the server list in order
|
||||||
// unless instructed otherwise via the client_config.
|
// unless instructed otherwise via the client_config.
|
||||||
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
|
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ServerList) Reset() { *m = ServerList{} }
|
func (m *ServerList) Reset() { *m = ServerList{} }
|
||||||
func (m *ServerList) String() string { return proto.CompactTextString(m) }
|
func (m *ServerList) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ServerList) ProtoMessage() {}
|
func (*ServerList) ProtoMessage() {}
|
||||||
func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
func (*ServerList) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{7}
|
||||||
|
}
|
||||||
|
func (m *ServerList) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ServerList.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ServerList.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *ServerList) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ServerList.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *ServerList) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ServerList.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ServerList) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ServerList.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ServerList proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *ServerList) GetServers() []*Server {
|
func (m *ServerList) GetServers() []*Server {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -508,13 +667,35 @@ type Server struct {
|
|||||||
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
|
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
|
||||||
// Indicates whether this particular request should be dropped by the client
|
// Indicates whether this particular request should be dropped by the client
|
||||||
// for load balancing.
|
// for load balancing.
|
||||||
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
|
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Server) Reset() { *m = Server{} }
|
func (m *Server) Reset() { *m = Server{} }
|
||||||
func (m *Server) String() string { return proto.CompactTextString(m) }
|
func (m *Server) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Server) ProtoMessage() {}
|
func (*Server) ProtoMessage() {}
|
||||||
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
func (*Server) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_messages_b81c731f0e83edbd, []int{8}
|
||||||
|
}
|
||||||
|
func (m *Server) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Server.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Server.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Server) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Server.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Server) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Server.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Server) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Server.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Server proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Server) GetIpAddress() []byte {
|
func (m *Server) GetIpAddress() []byte {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -563,53 +744,56 @@ func init() {
|
|||||||
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
|
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) }
|
func init() {
|
||||||
|
proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor_messages_b81c731f0e83edbd)
|
||||||
var fileDescriptor0 = []byte{
|
}
|
||||||
// 709 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b,
|
var fileDescriptor_messages_b81c731f0e83edbd = []byte{
|
||||||
0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69,
|
// 731 bytes of a gzipped FileDescriptorProto
|
||||||
0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
|
||||||
0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28,
|
0x14, 0x26, 0x9b, 0x00, 0xc9, 0x09, 0x5a, 0xb2, 0x26, 0x0b, 0x81, 0x05, 0x89, 0x1d, 0x69, 0xd9,
|
||||||
0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f,
|
0x68, 0xc5, 0x4e, 0x04, 0xd9, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43,
|
||||||
0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb,
|
0x55, 0xa9, 0x52, 0x65, 0x39, 0x19, 0x33, 0x58, 0x38, 0xf6, 0xd4, 0x76, 0x82, 0xfa, 0x08, 0x7d,
|
||||||
0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56,
|
0x94, 0x3e, 0x46, 0xd5, 0x67, 0xe8, 0xfb, 0x54, 0xe3, 0x99, 0xc9, 0x0c, 0x10, 0x40, 0xbd, 0x89,
|
||||||
0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3,
|
0xec, 0xe3, 0xef, 0x7c, 0xdf, 0xf1, 0x89, 0xbf, 0x33, 0xe0, 0x85, 0x3a, 0x1a, 0x11, 0x31, 0x24,
|
||||||
0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a,
|
0xd3, 0x83, 0xce, 0x98, 0x19, 0x43, 0x43, 0x66, 0x66, 0x0b, 0x3f, 0xd2, 0xca, 0x2a, 0x04, 0x31,
|
||||||
0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18,
|
0xc6, 0x17, 0x43, 0x7f, 0x7a, 0xe0, 0x3d, 0x85, 0xea, 0xf1, 0x44, 0x53, 0xcb, 0x95, 0x44, 0x2d,
|
||||||
0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8,
|
0x58, 0x36, 0x6c, 0xa4, 0x64, 0x60, 0x5a, 0xa5, 0xdd, 0x52, 0xbb, 0x8c, 0xb3, 0x2d, 0x6a, 0xc2,
|
||||||
0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a,
|
0xa2, 0xa4, 0x52, 0x99, 0xd6, 0x2f, 0xbb, 0xa5, 0xf6, 0x22, 0x4e, 0x36, 0xde, 0x33, 0xa8, 0x9d,
|
||||||
0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc,
|
0xf3, 0x31, 0x33, 0x96, 0x8e, 0xa3, 0x9f, 0x4e, 0xfe, 0x5a, 0x02, 0x74, 0xa6, 0x68, 0xd0, 0xa3,
|
||||||
0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d,
|
0x82, 0xca, 0x11, 0xc3, 0xec, 0xe3, 0x84, 0x19, 0x8b, 0xde, 0xc0, 0x2a, 0x97, 0xdc, 0x72, 0x2a,
|
||||||
0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f,
|
0x88, 0x4e, 0x42, 0x8e, 0xae, 0x7e, 0xf8, 0x97, 0x9f, 0x57, 0xed, 0x9f, 0x26, 0x90, 0xbb, 0xf9,
|
||||||
0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42,
|
0xfd, 0x05, 0xfc, 0x6b, 0x9a, 0x9f, 0x31, 0x3e, 0x87, 0x95, 0x91, 0xe0, 0x4c, 0x5a, 0x62, 0x2c,
|
||||||
0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b,
|
0xb5, 0x49, 0x15, 0xf5, 0xc3, 0x8d, 0x22, 0xdd, 0x91, 0x3b, 0x1f, 0xc4, 0xc7, 0xfd, 0x05, 0x5c,
|
||||||
0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf,
|
0x1f, 0xe5, 0xdb, 0xde, 0x1f, 0xb0, 0x29, 0x14, 0x0d, 0xc8, 0x30, 0x91, 0xc9, 0x8a, 0x22, 0xf6,
|
||||||
0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60,
|
0x53, 0xc4, 0xbc, 0x0e, 0x6c, 0xde, 0x5b, 0x09, 0x42, 0x50, 0x91, 0x74, 0xcc, 0x5c, 0xf9, 0x35,
|
||||||
0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3,
|
0xec, 0xd6, 0xde, 0xe7, 0x0a, 0xd4, 0x0b, 0x62, 0xa8, 0x0b, 0x35, 0x9b, 0x75, 0x30, 0xbd, 0xe7,
|
||||||
0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29,
|
0xef, 0xc5, 0xc2, 0x66, 0xed, 0xc5, 0x39, 0x0e, 0xfd, 0x03, 0xbf, 0xc9, 0xc9, 0x98, 0x8c, 0xa8,
|
||||||
0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9,
|
0x10, 0x26, 0xbe, 0x93, 0xb6, 0x2c, 0x70, 0xb7, 0x2a, 0xe3, 0x55, 0x39, 0x19, 0x1f, 0xc5, 0xf1,
|
||||||
0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1,
|
0x41, 0x12, 0x46, 0xfb, 0x80, 0x72, 0xec, 0x05, 0x97, 0xdc, 0x5c, 0xb2, 0xa0, 0x55, 0x76, 0xe0,
|
||||||
0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e,
|
0x46, 0x06, 0x3e, 0x49, 0xe3, 0x88, 0x80, 0x7f, 0x17, 0x4d, 0xae, 0xb9, 0xbd, 0x24, 0x81, 0x56,
|
||||||
0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd,
|
0x11, 0xb9, 0x50, 0x9a, 0x68, 0x6a, 0x19, 0x11, 0x7c, 0xcc, 0x2d, 0x97, 0x61, 0xab, 0xe2, 0x98,
|
||||||
0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a,
|
0xfe, 0xbe, 0xcd, 0xf4, 0x8e, 0xdb, 0xcb, 0x63, 0xad, 0xa2, 0x13, 0xa5, 0x31, 0xb5, 0xec, 0x2c,
|
||||||
0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa,
|
0x85, 0x23, 0x0a, 0x9d, 0x47, 0x05, 0x0a, 0xed, 0x8e, 0x15, 0x16, 0x9d, 0x42, 0xfb, 0x01, 0x85,
|
||||||
0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31,
|
0xbc, 0xf7, 0xb1, 0xc4, 0x07, 0xf8, 0xf7, 0x3e, 0x89, 0xf4, 0x19, 0x5c, 0x50, 0x2e, 0x58, 0x40,
|
||||||
0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a,
|
0xac, 0x22, 0x86, 0xc9, 0xa0, 0xb5, 0xe4, 0x04, 0xf6, 0xe6, 0x09, 0x24, 0x7f, 0xd5, 0x89, 0xc3,
|
||||||
0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79,
|
0x9f, 0xab, 0x01, 0x93, 0x01, 0xea, 0xc3, 0x9f, 0x73, 0xe8, 0xaf, 0xa4, 0xba, 0x96, 0x44, 0xb3,
|
||||||
0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8,
|
0x11, 0xe3, 0x53, 0x16, 0xb4, 0x96, 0x1d, 0xe5, 0xce, 0x6d, 0xca, 0xd7, 0x31, 0x0a, 0xa7, 0x20,
|
||||||
0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89,
|
0xef, 0x5b, 0x09, 0xd6, 0x6e, 0x3c, 0x1b, 0x13, 0x29, 0x69, 0x18, 0x1a, 0x40, 0x23, 0x77, 0x40,
|
||||||
0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f,
|
0x12, 0x4b, 0x9f, 0xc6, 0xde, 0x63, 0x16, 0x48, 0xd0, 0xfd, 0x05, 0xbc, 0x3a, 0xf3, 0x40, 0x4a,
|
||||||
0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7,
|
0xfa, 0x04, 0xea, 0x86, 0xe9, 0x29, 0xd3, 0x44, 0x70, 0x63, 0x53, 0x0f, 0xac, 0x17, 0xf9, 0x06,
|
||||||
0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a,
|
0xee, 0xf8, 0x8c, 0x3b, 0x0f, 0x81, 0x99, 0xed, 0x7a, 0xdb, 0xb0, 0x75, 0xcb, 0x01, 0x09, 0x67,
|
||||||
0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62,
|
0x62, 0x81, 0x2f, 0x25, 0xd8, 0xba, 0xbf, 0x14, 0xf4, 0x1f, 0xac, 0x17, 0x93, 0x35, 0x09, 0x98,
|
||||||
0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d,
|
0x60, 0x21, 0xb5, 0x99, 0x2d, 0x9a, 0x22, 0x4f, 0xd2, 0xc7, 0xe9, 0x19, 0x7a, 0x0b, 0xdb, 0x45,
|
||||||
0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77,
|
0xcb, 0x12, 0xcd, 0x22, 0xa5, 0x2d, 0xe1, 0xd2, 0x32, 0x3d, 0xa5, 0x22, 0x2d, 0xbf, 0x59, 0x2c,
|
||||||
0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc,
|
0x3f, 0x1b, 0x62, 0x78, 0xb3, 0xe0, 0x5e, 0xec, 0xf2, 0x4e, 0xd3, 0x34, 0xef, 0x05, 0x40, 0x7e,
|
||||||
0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76,
|
0x4b, 0xb4, 0x1f, 0x0f, 0xac, 0x78, 0x17, 0x0f, 0xac, 0x72, 0xbb, 0x7e, 0x88, 0xee, 0xb6, 0x03,
|
||||||
0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b,
|
0x67, 0x90, 0x57, 0x95, 0x6a, 0xb9, 0x51, 0xf1, 0xbe, 0x97, 0x60, 0x29, 0x39, 0x41, 0x3b, 0x00,
|
||||||
0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06,
|
0x3c, 0x22, 0x34, 0x08, 0x34, 0x33, 0xc9, 0xc8, 0x5b, 0xc1, 0x35, 0x1e, 0xbd, 0x4c, 0x02, 0xb1,
|
||||||
0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd,
|
0xfb, 0x63, 0xed, 0x74, 0xe6, 0xb9, 0x75, 0x6c, 0xc6, 0x1b, 0x9d, 0xb4, 0xea, 0x8a, 0x49, 0x67,
|
||||||
0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86,
|
0xc6, 0x1a, 0x6e, 0x14, 0x1a, 0x71, 0x1e, 0xc7, 0x51, 0x17, 0xd6, 0x1f, 0x30, 0x5d, 0x15, 0xaf,
|
||||||
0xa6, 0x4a, 0x06, 0x00, 0x00,
|
0x05, 0x73, 0x0c, 0xf6, 0x3f, 0x6c, 0x3c, 0x64, 0xa4, 0x2a, 0x6e, 0x06, 0x73, 0x4c, 0xd3, 0xeb,
|
||||||
|
0xbe, 0x3f, 0x08, 0x95, 0x0a, 0x05, 0xf3, 0x43, 0x25, 0xa8, 0x0c, 0x7d, 0xa5, 0xc3, 0x4e, 0xdc,
|
||||||
|
0x0d, 0xf7, 0x23, 0x86, 0x9d, 0x39, 0x5f, 0x95, 0xe1, 0x92, 0xfb, 0x9a, 0x74, 0x7f, 0x04, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x8e, 0xd0, 0x70, 0xb7, 0x73, 0x06, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
36
vendor/google.golang.org/grpc/grpclb_remote_balancer.go
generated
vendored
36
vendor/google.golang.org/grpc/grpclb_remote_balancer.go
generated
vendored
@ -26,6 +26,8 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/channelz"
|
||||||
|
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
@ -74,15 +76,16 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call refreshSubConns to create/remove SubConns.
|
// Call refreshSubConns to create/remove SubConns.
|
||||||
backendsUpdated := lb.refreshSubConns(backendAddrs)
|
lb.refreshSubConns(backendAddrs)
|
||||||
// If no backend was updated, no SubConn will be newed/removed. But since
|
// Regenerate and update picker no matter if there's update on backends (if
|
||||||
// the full serverList was different, there might be updates in drops or
|
// any SubConn will be newed/removed). Because since the full serverList was
|
||||||
// pick weights(different number of duplicates). We need to update picker
|
// different, there might be updates in drops or pick weights(different
|
||||||
// with the fulllist.
|
// number of duplicates). We need to update picker with the fulllist.
|
||||||
if !backendsUpdated {
|
//
|
||||||
lb.regeneratePicker()
|
// Now with cache, even if SubConn was newed/removed, there might be no
|
||||||
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
// state changes.
|
||||||
}
|
lb.regeneratePicker()
|
||||||
|
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
||||||
}
|
}
|
||||||
|
|
||||||
// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
|
// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
|
||||||
@ -112,7 +115,11 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
|
lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
|
||||||
lb.scStates[sc] = connectivity.Idle
|
if _, ok := lb.scStates[sc]; !ok {
|
||||||
|
// Only set state of new sc to IDLE. The state could already be
|
||||||
|
// READY for cached SubConns.
|
||||||
|
lb.scStates[sc] = connectivity.Idle
|
||||||
|
}
|
||||||
sc.Connect()
|
sc.Connect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,6 +175,7 @@ func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.D
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lb *lbBalancer) callRemoteBalancer() error {
|
func (lb *lbBalancer) callRemoteBalancer() error {
|
||||||
lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
|
lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
@ -243,9 +251,13 @@ func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
|
|||||||
// Explicitly set pickfirst as the balancer.
|
// Explicitly set pickfirst as the balancer.
|
||||||
dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
|
dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
|
||||||
dopts = append(dopts, withResolverBuilder(lb.manualResolver))
|
dopts = append(dopts, withResolverBuilder(lb.manualResolver))
|
||||||
// Dial using manualResolver.Scheme, which is a random scheme generated
|
if channelz.IsOn() {
|
||||||
|
dopts = append(dopts, WithChannelzParentID(lb.opt.ChannelzParentID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialContext using manualResolver.Scheme, which is a random scheme generated
|
||||||
// when init grpclb. The target name is not important.
|
// when init grpclb. The target name is not important.
|
||||||
cc, err := Dial("grpclb:///grpclb.server", dopts...)
|
cc, err := DialContext(context.Background(), "grpclb:///grpclb.server", dopts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Fatalf("failed to dial: %v", err)
|
grpclog.Fatalf("failed to dial: %v", err)
|
||||||
}
|
}
|
||||||
|
124
vendor/google.golang.org/grpc/grpclb_util.go
generated
vendored
124
vendor/google.golang.org/grpc/grpclb_util.go
generated
vendored
@ -19,7 +19,12 @@
|
|||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -88,3 +93,122 @@ func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
|
|||||||
func (r *lbManualResolver) NewServiceConfig(sc string) {
|
func (r *lbManualResolver) NewServiceConfig(sc string) {
|
||||||
r.ccr.NewServiceConfig(sc)
|
r.ccr.NewServiceConfig(sc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const subConnCacheTime = time.Second * 10
|
||||||
|
|
||||||
|
// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache.
|
||||||
|
// SubConns will be kept in cache for subConnCacheTime before being removed.
|
||||||
|
//
|
||||||
|
// Its new and remove methods are updated to do cache first.
|
||||||
|
type lbCacheClientConn struct {
|
||||||
|
cc balancer.ClientConn
|
||||||
|
timeout time.Duration
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
// subConnCache only keeps subConns that are being deleted.
|
||||||
|
subConnCache map[resolver.Address]*subConnCacheEntry
|
||||||
|
subConnToAddr map[balancer.SubConn]resolver.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
type subConnCacheEntry struct {
|
||||||
|
sc balancer.SubConn
|
||||||
|
|
||||||
|
cancel func()
|
||||||
|
abortDeleting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn {
|
||||||
|
return &lbCacheClientConn{
|
||||||
|
cc: cc,
|
||||||
|
timeout: subConnCacheTime,
|
||||||
|
subConnCache: make(map[resolver.Address]*subConnCacheEntry),
|
||||||
|
subConnToAddr: make(map[balancer.SubConn]resolver.Address),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
|
if len(addrs) != 1 {
|
||||||
|
return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs))
|
||||||
|
}
|
||||||
|
addrWithoutMD := addrs[0]
|
||||||
|
addrWithoutMD.Metadata = nil
|
||||||
|
|
||||||
|
ccc.mu.Lock()
|
||||||
|
defer ccc.mu.Unlock()
|
||||||
|
if entry, ok := ccc.subConnCache[addrWithoutMD]; ok {
|
||||||
|
// If entry is in subConnCache, the SubConn was being deleted.
|
||||||
|
// cancel function will never be nil.
|
||||||
|
entry.cancel()
|
||||||
|
delete(ccc.subConnCache, addrWithoutMD)
|
||||||
|
return entry.sc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
scNew, err := ccc.cc.NewSubConn(addrs, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ccc.subConnToAddr[scNew] = addrWithoutMD
|
||||||
|
return scNew, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) {
|
||||||
|
ccc.mu.Lock()
|
||||||
|
defer ccc.mu.Unlock()
|
||||||
|
addr, ok := ccc.subConnToAddr[sc]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry, ok := ccc.subConnCache[addr]; ok {
|
||||||
|
if entry.sc != sc {
|
||||||
|
// This could happen if NewSubConn was called multiple times for the
|
||||||
|
// same address, and those SubConns are all removed. We remove sc
|
||||||
|
// immediately here.
|
||||||
|
delete(ccc.subConnToAddr, sc)
|
||||||
|
ccc.cc.RemoveSubConn(sc)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := &subConnCacheEntry{
|
||||||
|
sc: sc,
|
||||||
|
}
|
||||||
|
ccc.subConnCache[addr] = entry
|
||||||
|
|
||||||
|
timer := time.AfterFunc(ccc.timeout, func() {
|
||||||
|
ccc.mu.Lock()
|
||||||
|
if entry.abortDeleting {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ccc.cc.RemoveSubConn(sc)
|
||||||
|
delete(ccc.subConnToAddr, sc)
|
||||||
|
delete(ccc.subConnCache, addr)
|
||||||
|
ccc.mu.Unlock()
|
||||||
|
})
|
||||||
|
entry.cancel = func() {
|
||||||
|
if !timer.Stop() {
|
||||||
|
// If stop was not successful, the timer has fired (this can only
|
||||||
|
// happen in a race). But the deleting function is blocked on ccc.mu
|
||||||
|
// because the mutex was held by the caller of this function.
|
||||||
|
//
|
||||||
|
// Set abortDeleting to true to abort the deleting function. When
|
||||||
|
// the lock is released, the deleting function will acquire the
|
||||||
|
// lock, check the value of abortDeleting and return.
|
||||||
|
entry.abortDeleting = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
||||||
|
ccc.cc.UpdateBalancerState(s, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccc *lbCacheClientConn) close() {
|
||||||
|
ccc.mu.Lock()
|
||||||
|
// Only cancel all existing timers. There's no need to remove SubConns.
|
||||||
|
for _, entry := range ccc.subConnCache {
|
||||||
|
entry.cancel()
|
||||||
|
}
|
||||||
|
ccc.mu.Unlock()
|
||||||
|
}
|
||||||
|
3
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
3
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
@ -105,18 +105,21 @@ func Fatalln(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||||
|
//
|
||||||
// Deprecated: use Info.
|
// Deprecated: use Info.
|
||||||
func Print(args ...interface{}) {
|
func Print(args ...interface{}) {
|
||||||
logger.Info(args...)
|
logger.Info(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||||
|
//
|
||||||
// Deprecated: use Infof.
|
// Deprecated: use Infof.
|
||||||
func Printf(format string, args ...interface{}) {
|
func Printf(format string, args ...interface{}) {
|
||||||
logger.Infof(format, args...)
|
logger.Infof(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||||
|
//
|
||||||
// Deprecated: use Infoln.
|
// Deprecated: use Infoln.
|
||||||
func Println(args ...interface{}) {
|
func Println(args ...interface{}) {
|
||||||
logger.Infoln(args...)
|
logger.Infoln(args...)
|
||||||
|
2
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
2
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
@ -19,6 +19,7 @@
|
|||||||
package grpclog
|
package grpclog
|
||||||
|
|
||||||
// Logger mimics golang's standard Logger as an interface.
|
// Logger mimics golang's standard Logger as an interface.
|
||||||
|
//
|
||||||
// Deprecated: use LoggerV2.
|
// Deprecated: use LoggerV2.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
Fatal(args ...interface{})
|
Fatal(args ...interface{})
|
||||||
@ -31,6 +32,7 @@ type Logger interface {
|
|||||||
|
|
||||||
// SetLogger sets the logger that is used in grpc. Call only from
|
// SetLogger sets the logger that is used in grpc. Call only from
|
||||||
// init() functions.
|
// init() functions.
|
||||||
|
//
|
||||||
// Deprecated: use SetLoggerV2.
|
// Deprecated: use SetLoggerV2.
|
||||||
func SetLogger(l Logger) {
|
func SetLogger(l Logger) {
|
||||||
logger = &loggerWrapper{Logger: l}
|
logger = &loggerWrapper{Logger: l}
|
||||||
|
91
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
91
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
@ -1,17 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: grpc_health_v1/health.proto
|
// source: grpc_health_v1/health.proto
|
||||||
|
|
||||||
/*
|
package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
|
||||||
Package grpc_health_v1 is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
grpc_health_v1/health.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
HealthCheckRequest
|
|
||||||
HealthCheckResponse
|
|
||||||
*/
|
|
||||||
package grpc_health_v1
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -56,17 +46,39 @@ func (x HealthCheckResponse_ServingStatus) String() string {
|
|||||||
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
||||||
}
|
}
|
||||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{1, 0}
|
return fileDescriptor_health_8e5b8a3074428511, []int{1, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
type HealthCheckRequest struct {
|
type HealthCheckRequest struct {
|
||||||
Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
|
Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
||||||
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*HealthCheckRequest) ProtoMessage() {}
|
func (*HealthCheckRequest) ProtoMessage() {}
|
||||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_health_8e5b8a3074428511, []int{0}
|
||||||
|
}
|
||||||
|
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *HealthCheckRequest) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_HealthCheckRequest.Size(m)
|
||||||
|
}
|
||||||
|
func (m *HealthCheckRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *HealthCheckRequest) GetService() string {
|
func (m *HealthCheckRequest) GetService() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -76,13 +88,35 @@ func (m *HealthCheckRequest) GetService() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HealthCheckResponse struct {
|
type HealthCheckResponse struct {
|
||||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
||||||
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*HealthCheckResponse) ProtoMessage() {}
|
func (*HealthCheckResponse) ProtoMessage() {}
|
||||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_health_8e5b8a3074428511, []int{1}
|
||||||
|
}
|
||||||
|
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *HealthCheckResponse) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_HealthCheckResponse.Size(m)
|
||||||
|
}
|
||||||
|
func (m *HealthCheckResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -169,10 +203,10 @@ var _Health_serviceDesc = grpc.ServiceDesc{
|
|||||||
Metadata: "grpc_health_v1/health.proto",
|
Metadata: "grpc_health_v1/health.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor_health_8e5b8a3074428511) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor_health_8e5b8a3074428511 = []byte{
|
||||||
// 213 bytes of a gzipped FileDescriptorProto
|
// 269 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||||
0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
|
0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
|
||||||
0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
|
0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
|
||||||
@ -185,6 +219,9 @@ var fileDescriptor0 = []byte{
|
|||||||
0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
|
0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
|
||||||
0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
|
0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
|
||||||
0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
|
0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
|
||||||
0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65,
|
0xb8, 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28,
|
||||||
0x20, 0x60, 0x01, 0x00, 0x00,
|
0x70, 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12,
|
||||||
|
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x41, 0x1a, 0xa0, 0x71, 0xa0, 0x8f, 0x1a, 0x33, 0xab,
|
||||||
|
0x98, 0xf8, 0xdc, 0x41, 0xa6, 0x41, 0x8c, 0xd0, 0x0b, 0x33, 0x4c, 0x62, 0x03, 0x47, 0x92, 0x31,
|
||||||
|
0x20, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x70, 0xc4, 0xa7, 0xc3, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
22
vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
generated
vendored
22
vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
generated
vendored
@ -1,4 +1,5 @@
|
|||||||
// Copyright 2017 gRPC authors.
|
// Copyright 2015, gRPC Authors
|
||||||
|
// All rights reserved.
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,23 +13,32 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// The canonical version of this proto can be found at
|
||||||
|
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
|
||||||
|
|
||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package grpc.health.v1;
|
package grpc.health.v1;
|
||||||
|
|
||||||
|
option csharp_namespace = "Grpc.Health.V1";
|
||||||
|
option go_package = "google.golang.org/grpc/health/grpc_health_v1";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option java_outer_classname = "HealthProto";
|
||||||
|
option java_package = "io.grpc.health.v1";
|
||||||
|
|
||||||
message HealthCheckRequest {
|
message HealthCheckRequest {
|
||||||
string service = 1;
|
string service = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message HealthCheckResponse {
|
message HealthCheckResponse {
|
||||||
enum ServingStatus {
|
enum ServingStatus {
|
||||||
UNKNOWN = 0;
|
UNKNOWN = 0;
|
||||||
SERVING = 1;
|
SERVING = 1;
|
||||||
NOT_SERVING = 2;
|
NOT_SERVING = 2;
|
||||||
}
|
}
|
||||||
ServingStatus status = 1;
|
ServingStatus status = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
service Health{
|
service Health {
|
||||||
rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
|
rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
|
||||||
}
|
}
|
||||||
|
2
vendor/google.golang.org/grpc/health/health.go
generated
vendored
2
vendor/google.golang.org/grpc/health/health.go
generated
vendored
@ -16,7 +16,7 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto
|
//go:generate protoc --go_out=plugins=grpc,paths=source_relative:. grpc_health_v1/health.proto
|
||||||
|
|
||||||
// Package health provides some utility functions to health-check a server. The implementation
|
// Package health provides some utility functions to health-check a server. The implementation
|
||||||
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
|
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
|
||||||
|
4
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
4
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
@ -48,7 +48,9 @@ type UnaryServerInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||||
// execution of a unary RPC.
|
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
|
||||||
|
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
|
||||||
|
// the status message of the RPC.
|
||||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||||
|
|
||||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||||
|
36
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
36
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
@ -28,7 +28,9 @@ import (
|
|||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
|
// DecodeKeyValue returns k, v, nil.
|
||||||
|
//
|
||||||
|
// Deprecated: use k and v directly instead.
|
||||||
func DecodeKeyValue(k, v string) (string, string, error) {
|
func DecodeKeyValue(k, v string) (string, string, error) {
|
||||||
return k, v, nil
|
return k, v, nil
|
||||||
}
|
}
|
||||||
@ -95,6 +97,30 @@ func (md MD) Copy() MD {
|
|||||||
return Join(md)
|
return Join(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get obtains the values for a given key.
|
||||||
|
func (md MD) Get(k string) []string {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
return md[k]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the value of a given key with a slice of values.
|
||||||
|
func (md MD) Set(k string, vals ...string) {
|
||||||
|
if len(vals) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
md[k] = vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append adds the values to key k, not overwriting what was already stored at that key.
|
||||||
|
func (md MD) Append(k string, vals ...string) {
|
||||||
|
if len(vals) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
md[k] = append(md[k], vals...)
|
||||||
|
}
|
||||||
|
|
||||||
// Join joins any number of mds into a single MD.
|
// Join joins any number of mds into a single MD.
|
||||||
// The order of values for each key is determined by the order in which
|
// The order of values for each key is determined by the order in which
|
||||||
// the mds containing those values are presented to Join.
|
// the mds containing those values are presented to Join.
|
||||||
@ -131,7 +157,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context
|
|||||||
panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
|
panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
|
||||||
}
|
}
|
||||||
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: append(md.added, kv)})
|
added := make([][]string, len(md.added)+1)
|
||||||
|
copy(added, md.added)
|
||||||
|
added[len(added)-1] = make([]string, len(kv))
|
||||||
|
copy(added[len(added)-1], kv)
|
||||||
|
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
|
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
|
||||||
@ -159,7 +189,7 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
|
|||||||
|
|
||||||
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
|
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
|
||||||
// returned MD should not be modified. Writing to it may cause races.
|
// returned MD should not be modified. Writing to it may cause races.
|
||||||
// Modification should be made to the copies of the returned MD.
|
// Modification should be made to copies of the returned MD.
|
||||||
func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
||||||
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
6
vendor/google.golang.org/grpc/naming/dns_resolver.go
generated
vendored
6
vendor/google.golang.org/grpc/naming/dns_resolver.go
generated
vendored
@ -153,10 +153,10 @@ type ipWatcher struct {
|
|||||||
updateChan chan *Update
|
updateChan chan *Update
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next returns the adrress resolution Update for the target. For IP address,
|
// Next returns the address resolution Update for the target. For IP address,
|
||||||
// the resolution is itself, thus polling name server is unncessary. Therefore,
|
// the resolution is itself, thus polling name server is unnecessary. Therefore,
|
||||||
// Next() will return an Update the first time it is called, and will be blocked
|
// Next() will return an Update the first time it is called, and will be blocked
|
||||||
// for all following calls as no Update exisits until watcher is closed.
|
// for all following calls as no Update exists until watcher is closed.
|
||||||
func (i *ipWatcher) Next() ([]*Update, error) {
|
func (i *ipWatcher) Next() ([]*Update, error) {
|
||||||
u, ok := <-i.updateChan
|
u, ok := <-i.updateChan
|
||||||
if !ok {
|
if !ok {
|
||||||
|
12
vendor/google.golang.org/grpc/naming/naming.go
generated
vendored
12
vendor/google.golang.org/grpc/naming/naming.go
generated
vendored
@ -18,20 +18,26 @@
|
|||||||
|
|
||||||
// Package naming defines the naming API and related data structures for gRPC.
|
// Package naming defines the naming API and related data structures for gRPC.
|
||||||
// The interface is EXPERIMENTAL and may be suject to change.
|
// The interface is EXPERIMENTAL and may be suject to change.
|
||||||
|
//
|
||||||
|
// Deprecated: please use package resolver.
|
||||||
package naming
|
package naming
|
||||||
|
|
||||||
// Operation defines the corresponding operations for a name resolution change.
|
// Operation defines the corresponding operations for a name resolution change.
|
||||||
|
//
|
||||||
|
// Deprecated: please use package resolver.
|
||||||
type Operation uint8
|
type Operation uint8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Add indicates a new address is added.
|
// Add indicates a new address is added.
|
||||||
Add Operation = iota
|
Add Operation = iota
|
||||||
// Delete indicates an exisiting address is deleted.
|
// Delete indicates an existing address is deleted.
|
||||||
Delete
|
Delete
|
||||||
)
|
)
|
||||||
|
|
||||||
// Update defines a name resolution update. Notice that it is not valid having both
|
// Update defines a name resolution update. Notice that it is not valid having both
|
||||||
// empty string Addr and nil Metadata in an Update.
|
// empty string Addr and nil Metadata in an Update.
|
||||||
|
//
|
||||||
|
// Deprecated: please use package resolver.
|
||||||
type Update struct {
|
type Update struct {
|
||||||
// Op indicates the operation of the update.
|
// Op indicates the operation of the update.
|
||||||
Op Operation
|
Op Operation
|
||||||
@ -43,12 +49,16 @@ type Update struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Resolver creates a Watcher for a target to track its resolution changes.
|
// Resolver creates a Watcher for a target to track its resolution changes.
|
||||||
|
//
|
||||||
|
// Deprecated: please use package resolver.
|
||||||
type Resolver interface {
|
type Resolver interface {
|
||||||
// Resolve creates a Watcher for target.
|
// Resolve creates a Watcher for target.
|
||||||
Resolve(target string) (Watcher, error)
|
Resolve(target string) (Watcher, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watcher watches for the updates on the specified target.
|
// Watcher watches for the updates on the specified target.
|
||||||
|
//
|
||||||
|
// Deprecated: please use package resolver.
|
||||||
type Watcher interface {
|
type Watcher interface {
|
||||||
// Next blocks until an update or error happens. It may return one or more
|
// Next blocks until an update or error happens. It may return one or more
|
||||||
// updates. The first call should get the full set of the results. It should
|
// updates. The first call should get the full set of the results. It should
|
||||||
|
194
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
194
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
@ -19,12 +19,17 @@
|
|||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/transport"
|
"google.golang.org/grpc/transport"
|
||||||
)
|
)
|
||||||
@ -36,13 +41,57 @@ type pickerWrapper struct {
|
|||||||
done bool
|
done bool
|
||||||
blockingCh chan struct{}
|
blockingCh chan struct{}
|
||||||
picker balancer.Picker
|
picker balancer.Picker
|
||||||
|
|
||||||
|
// The latest connection happened.
|
||||||
|
connErrMu sync.Mutex
|
||||||
|
connErr error
|
||||||
|
|
||||||
|
stickinessMDKey atomic.Value
|
||||||
|
stickiness *stickyStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPickerWrapper() *pickerWrapper {
|
func newPickerWrapper() *pickerWrapper {
|
||||||
bp := &pickerWrapper{blockingCh: make(chan struct{})}
|
bp := &pickerWrapper{
|
||||||
|
blockingCh: make(chan struct{}),
|
||||||
|
stickiness: newStickyStore(),
|
||||||
|
}
|
||||||
return bp
|
return bp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bp *pickerWrapper) updateConnectionError(err error) {
|
||||||
|
bp.connErrMu.Lock()
|
||||||
|
bp.connErr = err
|
||||||
|
bp.connErrMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *pickerWrapper) connectionError() error {
|
||||||
|
bp.connErrMu.Lock()
|
||||||
|
err := bp.connErr
|
||||||
|
bp.connErrMu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *pickerWrapper) updateStickinessMDKey(newKey string) {
|
||||||
|
// No need to check ok because mdKey == "" if ok == false.
|
||||||
|
if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey {
|
||||||
|
bp.stickinessMDKey.Store(newKey)
|
||||||
|
bp.stickiness.reset(newKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *pickerWrapper) getStickinessMDKey() string {
|
||||||
|
// No need to check ok because mdKey == "" if ok == false.
|
||||||
|
mdKey, _ := bp.stickinessMDKey.Load().(string)
|
||||||
|
return mdKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *pickerWrapper) clearStickinessState() {
|
||||||
|
if oldKey := bp.getStickinessMDKey(); oldKey != "" {
|
||||||
|
// There's no need to reset store if mdKey was "".
|
||||||
|
bp.stickiness.reset(oldKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||||
bp.mu.Lock()
|
bp.mu.Lock()
|
||||||
@ -57,6 +106,23 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
|||||||
bp.mu.Unlock()
|
bp.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||||
|
acw.mu.Lock()
|
||||||
|
ac := acw.ac
|
||||||
|
acw.mu.Unlock()
|
||||||
|
ac.incrCallsStarted()
|
||||||
|
return func(b balancer.DoneInfo) {
|
||||||
|
if b.Err != nil && b.Err != io.EOF {
|
||||||
|
ac.incrCallsFailed()
|
||||||
|
} else {
|
||||||
|
ac.incrCallsSucceeded()
|
||||||
|
}
|
||||||
|
if done != nil {
|
||||||
|
done(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// pick returns the transport that will be used for the RPC.
|
// pick returns the transport that will be used for the RPC.
|
||||||
// It may block in the following cases:
|
// It may block in the following cases:
|
||||||
// - there's no picker
|
// - there's no picker
|
||||||
@ -65,6 +131,27 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
|||||||
// - the subConn returned by the current picker is not READY
|
// - the subConn returned by the current picker is not READY
|
||||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||||
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||||
|
|
||||||
|
mdKey := bp.getStickinessMDKey()
|
||||||
|
stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey)
|
||||||
|
|
||||||
|
// Potential race here: if stickinessMDKey is updated after the above two
|
||||||
|
// lines, and this pick is a sticky pick, the following put could add an
|
||||||
|
// entry to sticky store with an outdated sticky key.
|
||||||
|
//
|
||||||
|
// The solution: keep the current md key in sticky store, and at the
|
||||||
|
// beginning of each get/put, check the mdkey against store.curMDKey.
|
||||||
|
// - Cons: one more string comparing for each get/put.
|
||||||
|
// - Pros: the string matching happens inside get/put, so the overhead for
|
||||||
|
// non-sticky RPCs will be minimal.
|
||||||
|
|
||||||
|
if isSticky {
|
||||||
|
if t, ok := bp.stickiness.get(mdKey, stickyKey); ok {
|
||||||
|
// Done function returned is always nil.
|
||||||
|
return t, nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
p balancer.Picker
|
p balancer.Picker
|
||||||
ch chan struct{}
|
ch chan struct{}
|
||||||
@ -107,7 +194,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
|||||||
if !failfast {
|
if !failfast {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, nil, status.Errorf(codes.Unavailable, "%v", err)
|
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
||||||
default:
|
default:
|
||||||
// err is some other error.
|
// err is some other error.
|
||||||
return nil, nil, toRPCErr(err)
|
return nil, nil, toRPCErr(err)
|
||||||
@ -120,6 +207,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||||
|
if isSticky {
|
||||||
|
bp.stickiness.put(mdKey, stickyKey, acw)
|
||||||
|
}
|
||||||
|
if channelz.IsOn() {
|
||||||
|
return t, doneChannelzWrapper(acw, done), nil
|
||||||
|
}
|
||||||
return t, done, nil
|
return t, done, nil
|
||||||
}
|
}
|
||||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||||
@ -139,3 +232,100 @@ func (bp *pickerWrapper) close() {
|
|||||||
bp.done = true
|
bp.done = true
|
||||||
close(bp.blockingCh)
|
close(bp.blockingCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type stickyStoreEntry struct {
|
||||||
|
acw *acBalancerWrapper
|
||||||
|
addr resolver.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
type stickyStore struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
// curMDKey is check before every get/put to avoid races. The operation will
|
||||||
|
// abort immediately when the given mdKey is different from the curMDKey.
|
||||||
|
curMDKey string
|
||||||
|
store map[string]*stickyStoreEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStickyStore() *stickyStore {
|
||||||
|
return &stickyStore{
|
||||||
|
store: make(map[string]*stickyStoreEntry),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset clears the map in stickyStore, and set the currentMDKey to newMDKey.
|
||||||
|
func (ss *stickyStore) reset(newMDKey string) {
|
||||||
|
ss.mu.Lock()
|
||||||
|
ss.curMDKey = newMDKey
|
||||||
|
ss.store = make(map[string]*stickyStoreEntry)
|
||||||
|
ss.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// stickyKey is the key to look up in store. mdKey will be checked against
|
||||||
|
// curMDKey to avoid races.
|
||||||
|
func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) {
|
||||||
|
ss.mu.Lock()
|
||||||
|
defer ss.mu.Unlock()
|
||||||
|
if mdKey != ss.curMDKey {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TODO(stickiness): limit the total number of entries.
|
||||||
|
ss.store[stickyKey] = &stickyStoreEntry{
|
||||||
|
acw: acw,
|
||||||
|
addr: acw.getAddrConn().getCurAddr(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// stickyKey is the key to look up in store. mdKey will be checked against
|
||||||
|
// curMDKey to avoid races.
|
||||||
|
func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) {
|
||||||
|
ss.mu.Lock()
|
||||||
|
defer ss.mu.Unlock()
|
||||||
|
if mdKey != ss.curMDKey {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
entry, ok := ss.store[stickyKey]
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
ac := entry.acw.getAddrConn()
|
||||||
|
if ac.getCurAddr() != entry.addr {
|
||||||
|
delete(ss.store, stickyKey)
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
t, ok := ac.getReadyTransport()
|
||||||
|
if !ok {
|
||||||
|
delete(ss.store, stickyKey)
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return t, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get one value from metadata in ctx with key stickinessMDKey.
|
||||||
|
//
|
||||||
|
// It returns "", false if stickinessMDKey is an empty string.
|
||||||
|
func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) {
|
||||||
|
if stickinessMDKey == "" {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
md, added, ok := metadata.FromOutgoingContextRaw(ctx)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
if vv, ok := md[stickinessMDKey]; ok {
|
||||||
|
if len(vv) > 0 {
|
||||||
|
return vv[0], true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ss := range added {
|
||||||
|
for i := 0; i < len(ss)-1; i += 2 {
|
||||||
|
if ss[i] == stickinessMDKey {
|
||||||
|
return ss[i+1], true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
44
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
generated
vendored
44
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
generated
vendored
@ -50,7 +50,10 @@ const (
|
|||||||
txtAttribute = "grpc_config="
|
txtAttribute = "grpc_config="
|
||||||
)
|
)
|
||||||
|
|
||||||
var errMissingAddr = errors.New("missing address")
|
var (
|
||||||
|
errMissingAddr = errors.New("missing address")
|
||||||
|
randomGen = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
)
|
||||||
|
|
||||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||||
func NewBuilder() resolver.Builder {
|
func NewBuilder() resolver.Builder {
|
||||||
@ -87,14 +90,15 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
|||||||
// DNS address (non-IP).
|
// DNS address (non-IP).
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
d := &dnsResolver{
|
d := &dnsResolver{
|
||||||
freq: b.freq,
|
freq: b.freq,
|
||||||
host: host,
|
host: host,
|
||||||
port: port,
|
port: port,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
cc: cc,
|
cc: cc,
|
||||||
t: time.NewTimer(0),
|
t: time.NewTimer(0),
|
||||||
rn: make(chan struct{}, 1),
|
rn: make(chan struct{}, 1),
|
||||||
|
disableServiceConfig: opts.DisableServiceConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
d.wg.Add(1)
|
d.wg.Add(1)
|
||||||
@ -157,7 +161,8 @@ type dnsResolver struct {
|
|||||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
||||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
||||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
disableServiceConfig bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||||
@ -187,7 +192,7 @@ func (d *dnsResolver) watcher() {
|
|||||||
result, sc := d.lookup()
|
result, sc := d.lookup()
|
||||||
// Next lookup should happen after an interval defined by d.freq.
|
// Next lookup should happen after an interval defined by d.freq.
|
||||||
d.t.Reset(d.freq)
|
d.t.Reset(d.freq)
|
||||||
d.cc.NewServiceConfig(string(sc))
|
d.cc.NewServiceConfig(sc)
|
||||||
d.cc.NewAddress(result)
|
d.cc.NewAddress(result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -202,7 +207,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
|
|||||||
for _, s := range srvs {
|
for _, s := range srvs {
|
||||||
lbAddrs, err := lookupHost(d.ctx, s.Target)
|
lbAddrs, err := lookupHost(d.ctx, s.Target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
|
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, a := range lbAddrs {
|
for _, a := range lbAddrs {
|
||||||
@ -221,7 +226,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
|
|||||||
func (d *dnsResolver) lookupTXT() string {
|
func (d *dnsResolver) lookupTXT() string {
|
||||||
ss, err := lookupTXT(d.ctx, d.host)
|
ss, err := lookupTXT(d.ctx, d.host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err)
|
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
var res string
|
var res string
|
||||||
@ -257,10 +262,12 @@ func (d *dnsResolver) lookupHost() []resolver.Address {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
|
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
|
||||||
var newAddrs []resolver.Address
|
newAddrs := d.lookupSRV()
|
||||||
newAddrs = d.lookupSRV()
|
|
||||||
// Support fallback to non-balancer address.
|
// Support fallback to non-balancer address.
|
||||||
newAddrs = append(newAddrs, d.lookupHost()...)
|
newAddrs = append(newAddrs, d.lookupHost()...)
|
||||||
|
if d.disableServiceConfig {
|
||||||
|
return newAddrs, ""
|
||||||
|
}
|
||||||
sc := d.lookupTXT()
|
sc := d.lookupTXT()
|
||||||
return newAddrs, canaryingSC(sc)
|
return newAddrs, canaryingSC(sc)
|
||||||
}
|
}
|
||||||
@ -339,12 +346,7 @@ func chosenByPercentage(a *int) bool {
|
|||||||
if a == nil {
|
if a == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
s := rand.NewSource(time.Now().UnixNano())
|
return randomGen.Intn(100)+1 <= *a
|
||||||
r := rand.New(s)
|
|
||||||
if r.Intn(100)+1 > *a {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func canaryingSC(js string) string {
|
func canaryingSC(js string) string {
|
||||||
|
10
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
10
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@ -29,8 +29,12 @@ var (
|
|||||||
|
|
||||||
// TODO(bar) install dns resolver in init(){}.
|
// TODO(bar) install dns resolver in init(){}.
|
||||||
|
|
||||||
// Register registers the resolver builder to the resolver map.
|
// Register registers the resolver builder to the resolver map. b.Scheme will be
|
||||||
// b.Scheme will be used as the scheme registered with this builder.
|
// used as the scheme registered with this builder.
|
||||||
|
//
|
||||||
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
|
// an init() function), and is not thread-safe. If multiple Resolvers are
|
||||||
|
// registered with the same name, the one registered last will take effect.
|
||||||
func Register(b Builder) {
|
func Register(b Builder) {
|
||||||
m[b.Scheme()] = b
|
m[b.Scheme()] = b
|
||||||
}
|
}
|
||||||
@ -86,6 +90,8 @@ type Address struct {
|
|||||||
// BuildOption includes additional information for the builder to create
|
// BuildOption includes additional information for the builder to create
|
||||||
// the resolver.
|
// the resolver.
|
||||||
type BuildOption struct {
|
type BuildOption struct {
|
||||||
|
// DisableServiceConfig indicates whether resolver should fetch service config data.
|
||||||
|
DisableServiceConfig bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientConn contains the callbacks for resolver to notify any updates
|
// ClientConn contains the callbacks for resolver to notify any updates
|
||||||
|
9
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
9
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
@ -57,7 +57,10 @@ func parseTarget(target string) (ret resolver.Target) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return resolver.Target{Endpoint: target}
|
return resolver.Target{Endpoint: target}
|
||||||
}
|
}
|
||||||
ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/")
|
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||||
|
if !ok {
|
||||||
|
return resolver.Target{Endpoint: target}
|
||||||
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,7 +84,7 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{})
|
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -92,7 +95,7 @@ func (ccr *ccResolverWrapper) start() {
|
|||||||
go ccr.watcher()
|
go ccr.watcher()
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcher processes address updates and service config updates sequencially.
|
// watcher processes address updates and service config updates sequentially.
|
||||||
// Otherwise, we need to resolve possible races between address and service
|
// Otherwise, we need to resolve possible races between address and service
|
||||||
// config (e.g. they specify different balancer types).
|
// config (e.g. they specify different balancer types).
|
||||||
func (ccr *ccResolverWrapper) watcher() {
|
func (ccr *ccResolverWrapper) watcher() {
|
||||||
|
274
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
274
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@ -22,9 +22,11 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -42,6 +44,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Compressor defines the interface gRPC uses to compress a message.
|
// Compressor defines the interface gRPC uses to compress a message.
|
||||||
|
//
|
||||||
|
// Deprecated: use package encoding.
|
||||||
type Compressor interface {
|
type Compressor interface {
|
||||||
// Do compresses p into w.
|
// Do compresses p into w.
|
||||||
Do(w io.Writer, p []byte) error
|
Do(w io.Writer, p []byte) error
|
||||||
@ -54,14 +58,34 @@ type gzipCompressor struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGZIPCompressor creates a Compressor based on GZIP.
|
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||||
|
//
|
||||||
|
// Deprecated: use package encoding/gzip.
|
||||||
func NewGZIPCompressor() Compressor {
|
func NewGZIPCompressor() Compressor {
|
||||||
|
c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
|
||||||
|
// of assuming DefaultCompression.
|
||||||
|
//
|
||||||
|
// The error returned will be nil if the level is valid.
|
||||||
|
//
|
||||||
|
// Deprecated: use package encoding/gzip.
|
||||||
|
func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||||||
|
if level < gzip.DefaultCompression || level > gzip.BestCompression {
|
||||||
|
return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
|
||||||
|
}
|
||||||
return &gzipCompressor{
|
return &gzipCompressor{
|
||||||
pool: sync.Pool{
|
pool: sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
return gzip.NewWriter(ioutil.Discard)
|
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return w
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||||
@ -79,6 +103,8 @@ func (c *gzipCompressor) Type() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Decompressor defines the interface gRPC uses to decompress a message.
|
// Decompressor defines the interface gRPC uses to decompress a message.
|
||||||
|
//
|
||||||
|
// Deprecated: use package encoding.
|
||||||
type Decompressor interface {
|
type Decompressor interface {
|
||||||
// Do reads the data from r and uncompress them.
|
// Do reads the data from r and uncompress them.
|
||||||
Do(r io.Reader) ([]byte, error)
|
Do(r io.Reader) ([]byte, error)
|
||||||
@ -91,6 +117,8 @@ type gzipDecompressor struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
||||||
|
//
|
||||||
|
// Deprecated: use package encoding/gzip.
|
||||||
func NewGZIPDecompressor() Decompressor {
|
func NewGZIPDecompressor() Decompressor {
|
||||||
return &gzipDecompressor{}
|
return &gzipDecompressor{}
|
||||||
}
|
}
|
||||||
@ -127,7 +155,7 @@ func (d *gzipDecompressor) Type() string {
|
|||||||
type callInfo struct {
|
type callInfo struct {
|
||||||
compressorType string
|
compressorType string
|
||||||
failFast bool
|
failFast bool
|
||||||
stream *transport.Stream
|
stream *clientStream
|
||||||
traceInfo traceInfo // in trace.go
|
traceInfo traceInfo // in trace.go
|
||||||
maxReceiveMessageSize *int
|
maxReceiveMessageSize *int
|
||||||
maxSendMessageSize *int
|
maxSendMessageSize *int
|
||||||
@ -160,46 +188,66 @@ type EmptyCallOption struct{}
|
|||||||
func (EmptyCallOption) before(*callInfo) error { return nil }
|
func (EmptyCallOption) before(*callInfo) error { return nil }
|
||||||
func (EmptyCallOption) after(*callInfo) {}
|
func (EmptyCallOption) after(*callInfo) {}
|
||||||
|
|
||||||
type beforeCall func(c *callInfo) error
|
|
||||||
|
|
||||||
func (o beforeCall) before(c *callInfo) error { return o(c) }
|
|
||||||
func (o beforeCall) after(c *callInfo) {}
|
|
||||||
|
|
||||||
type afterCall func(c *callInfo)
|
|
||||||
|
|
||||||
func (o afterCall) before(c *callInfo) error { return nil }
|
|
||||||
func (o afterCall) after(c *callInfo) { o(c) }
|
|
||||||
|
|
||||||
// Header returns a CallOptions that retrieves the header metadata
|
// Header returns a CallOptions that retrieves the header metadata
|
||||||
// for a unary RPC.
|
// for a unary RPC.
|
||||||
func Header(md *metadata.MD) CallOption {
|
func Header(md *metadata.MD) CallOption {
|
||||||
return afterCall(func(c *callInfo) {
|
return HeaderCallOption{HeaderAddr: md}
|
||||||
if c.stream != nil {
|
}
|
||||||
*md, _ = c.stream.Header()
|
|
||||||
}
|
// HeaderCallOption is a CallOption for collecting response header metadata.
|
||||||
})
|
// The metadata field will be populated *after* the RPC completes.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type HeaderCallOption struct {
|
||||||
|
HeaderAddr *metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o HeaderCallOption) before(c *callInfo) error { return nil }
|
||||||
|
func (o HeaderCallOption) after(c *callInfo) {
|
||||||
|
if c.stream != nil {
|
||||||
|
*o.HeaderAddr, _ = c.stream.Header()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trailer returns a CallOptions that retrieves the trailer metadata
|
// Trailer returns a CallOptions that retrieves the trailer metadata
|
||||||
// for a unary RPC.
|
// for a unary RPC.
|
||||||
func Trailer(md *metadata.MD) CallOption {
|
func Trailer(md *metadata.MD) CallOption {
|
||||||
return afterCall(func(c *callInfo) {
|
return TrailerCallOption{TrailerAddr: md}
|
||||||
if c.stream != nil {
|
|
||||||
*md = c.stream.Trailer()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer returns a CallOption that retrieves peer information for a
|
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
||||||
// unary RPC.
|
// The metadata field will be populated *after* the RPC completes.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type TrailerCallOption struct {
|
||||||
|
TrailerAddr *metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o TrailerCallOption) before(c *callInfo) error { return nil }
|
||||||
|
func (o TrailerCallOption) after(c *callInfo) {
|
||||||
|
if c.stream != nil {
|
||||||
|
*o.TrailerAddr = c.stream.Trailer()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peer returns a CallOption that retrieves peer information for a unary RPC.
|
||||||
|
// The peer field will be populated *after* the RPC completes.
|
||||||
func Peer(p *peer.Peer) CallOption {
|
func Peer(p *peer.Peer) CallOption {
|
||||||
return afterCall(func(c *callInfo) {
|
return PeerCallOption{PeerAddr: p}
|
||||||
if c.stream != nil {
|
}
|
||||||
if x, ok := peer.FromContext(c.stream.Context()); ok {
|
|
||||||
*p = *x
|
// PeerCallOption is a CallOption for collecting the identity of the remote
|
||||||
}
|
// peer. The peer field will be populated *after* the RPC completes.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type PeerCallOption struct {
|
||||||
|
PeerAddr *peer.Peer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o PeerCallOption) before(c *callInfo) error { return nil }
|
||||||
|
func (o PeerCallOption) after(c *callInfo) {
|
||||||
|
if c.stream != nil {
|
||||||
|
if x, ok := peer.FromContext(c.stream.Context()); ok {
|
||||||
|
*o.PeerAddr = *x
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailFast configures the action to take when an RPC is attempted on broken
|
// FailFast configures the action to take when an RPC is attempted on broken
|
||||||
@ -213,49 +261,98 @@ func Peer(p *peer.Peer) CallOption {
|
|||||||
//
|
//
|
||||||
// By default, RPCs are "Fail Fast".
|
// By default, RPCs are "Fail Fast".
|
||||||
func FailFast(failFast bool) CallOption {
|
func FailFast(failFast bool) CallOption {
|
||||||
return beforeCall(func(c *callInfo) error {
|
return FailFastCallOption{FailFast: failFast}
|
||||||
c.failFast = failFast
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
||||||
|
// fast or not.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type FailFastCallOption struct {
|
||||||
|
FailFast bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o FailFastCallOption) before(c *callInfo) error {
|
||||||
|
c.failFast = o.FailFast
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o FailFastCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
|
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
|
||||||
func MaxCallRecvMsgSize(s int) CallOption {
|
func MaxCallRecvMsgSize(s int) CallOption {
|
||||||
return beforeCall(func(o *callInfo) error {
|
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
|
||||||
o.maxReceiveMessageSize = &s
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||||
|
// size the client can receive.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type MaxRecvMsgSizeCallOption struct {
|
||||||
|
MaxRecvMsgSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
||||||
|
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
|
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
|
||||||
func MaxCallSendMsgSize(s int) CallOption {
|
func MaxCallSendMsgSize(s int) CallOption {
|
||||||
return beforeCall(func(o *callInfo) error {
|
return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
|
||||||
o.maxSendMessageSize = &s
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||||
|
// size the client can send.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type MaxSendMsgSizeCallOption struct {
|
||||||
|
MaxSendMsgSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
|
||||||
|
c.maxSendMessageSize = &o.MaxSendMsgSize
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
|
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
|
||||||
// for a call.
|
// for a call.
|
||||||
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
||||||
return beforeCall(func(c *callInfo) error {
|
return PerRPCCredsCallOption{Creds: creds}
|
||||||
c.creds = creds
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
||||||
|
// credentials to use for the call.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type PerRPCCredsCallOption struct {
|
||||||
|
Creds credentials.PerRPCCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o PerRPCCredsCallOption) before(c *callInfo) error {
|
||||||
|
c.creds = o.Creds
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o PerRPCCredsCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// UseCompressor returns a CallOption which sets the compressor used when
|
// UseCompressor returns a CallOption which sets the compressor used when
|
||||||
// sending the request. If WithCompressor is also set, UseCompressor has
|
// sending the request. If WithCompressor is also set, UseCompressor has
|
||||||
// higher priority.
|
// higher priority.
|
||||||
//
|
//
|
||||||
// This API is EXPERIMENTAL.
|
// This API is EXPERIMENTAL.
|
||||||
func UseCompressor(name string) CallOption {
|
func UseCompressor(name string) CallOption {
|
||||||
return beforeCall(func(c *callInfo) error {
|
return CompressorCallOption{CompressorType: name}
|
||||||
c.compressorType = name
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type CompressorCallOption struct {
|
||||||
|
CompressorType string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o CompressorCallOption) before(c *callInfo) error {
|
||||||
|
c.compressorType = o.CompressorType
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o CompressorCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// CallContentSubtype returns a CallOption that will set the content-subtype
|
// CallContentSubtype returns a CallOption that will set the content-subtype
|
||||||
// for a call. For example, if content-subtype is "json", the Content-Type over
|
// for a call. For example, if content-subtype is "json", the Content-Type over
|
||||||
// the wire will be "application/grpc+json". The content-subtype is converted
|
// the wire will be "application/grpc+json". The content-subtype is converted
|
||||||
@ -265,7 +362,7 @@ func UseCompressor(name string) CallOption {
|
|||||||
//
|
//
|
||||||
// If CallCustomCodec is not also used, the content-subtype will be used to
|
// If CallCustomCodec is not also used, the content-subtype will be used to
|
||||||
// look up the Codec to use in the registry controlled by RegisterCodec. See
|
// look up the Codec to use in the registry controlled by RegisterCodec. See
|
||||||
// the documention on RegisterCodec for details on registration. The lookup
|
// the documentation on RegisterCodec for details on registration. The lookup
|
||||||
// of content-subtype is case-insensitive. If no such Codec is found, the call
|
// of content-subtype is case-insensitive. If no such Codec is found, the call
|
||||||
// will result in an error with code codes.Internal.
|
// will result in an error with code codes.Internal.
|
||||||
//
|
//
|
||||||
@ -273,13 +370,22 @@ func UseCompressor(name string) CallOption {
|
|||||||
// response messages, with the content-subtype set to the given contentSubtype
|
// response messages, with the content-subtype set to the given contentSubtype
|
||||||
// here for requests.
|
// here for requests.
|
||||||
func CallContentSubtype(contentSubtype string) CallOption {
|
func CallContentSubtype(contentSubtype string) CallOption {
|
||||||
contentSubtype = strings.ToLower(contentSubtype)
|
return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
|
||||||
return beforeCall(func(c *callInfo) error {
|
|
||||||
c.contentSubtype = contentSubtype
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
||||||
|
// used for marshaling messages.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type ContentSubtypeCallOption struct {
|
||||||
|
ContentSubtype string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o ContentSubtypeCallOption) before(c *callInfo) error {
|
||||||
|
c.contentSubtype = o.ContentSubtype
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o ContentSubtypeCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// CallCustomCodec returns a CallOption that will set the given Codec to be
|
// CallCustomCodec returns a CallOption that will set the given Codec to be
|
||||||
// used for all request and response messages for a call. The result of calling
|
// used for all request and response messages for a call. The result of calling
|
||||||
// String() will be used as the content-subtype in a case-insensitive manner.
|
// String() will be used as the content-subtype in a case-insensitive manner.
|
||||||
@ -293,12 +399,22 @@ func CallContentSubtype(contentSubtype string) CallOption {
|
|||||||
// This function is provided for advanced users; prefer to use only
|
// This function is provided for advanced users; prefer to use only
|
||||||
// CallContentSubtype to select a registered codec instead.
|
// CallContentSubtype to select a registered codec instead.
|
||||||
func CallCustomCodec(codec Codec) CallOption {
|
func CallCustomCodec(codec Codec) CallOption {
|
||||||
return beforeCall(func(c *callInfo) error {
|
return CustomCodecCallOption{Codec: codec}
|
||||||
c.codec = codec
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||||||
|
// marshaling messages.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type CustomCodecCallOption struct {
|
||||||
|
Codec Codec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o CustomCodecCallOption) before(c *callInfo) error {
|
||||||
|
c.codec = o.Codec
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o CustomCodecCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// The format of the payload: compressed or not?
|
// The format of the payload: compressed or not?
|
||||||
type payloadFormat uint8
|
type payloadFormat uint8
|
||||||
|
|
||||||
@ -557,6 +673,40 @@ func setCallInfoCodec(c *callInfo) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseDialTarget returns the network and address to pass to dialer
|
||||||
|
func parseDialTarget(target string) (net string, addr string) {
|
||||||
|
net = "tcp"
|
||||||
|
|
||||||
|
m1 := strings.Index(target, ":")
|
||||||
|
m2 := strings.Index(target, ":/")
|
||||||
|
|
||||||
|
// handle unix:addr which will fail with url.Parse
|
||||||
|
if m1 >= 0 && m2 < 0 {
|
||||||
|
if n := target[0:m1]; n == "unix" {
|
||||||
|
net = n
|
||||||
|
addr = target[m1+1:]
|
||||||
|
return net, addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m2 >= 0 {
|
||||||
|
t, err := url.Parse(target)
|
||||||
|
if err != nil {
|
||||||
|
return net, target
|
||||||
|
}
|
||||||
|
scheme := t.Scheme
|
||||||
|
addr = t.Path
|
||||||
|
if scheme == "unix" {
|
||||||
|
net = scheme
|
||||||
|
if addr == "" {
|
||||||
|
addr = t.Host
|
||||||
|
}
|
||||||
|
return net, addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return net, target
|
||||||
|
}
|
||||||
|
|
||||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||||
// support package version is 5.
|
// support package version is 5.
|
||||||
@ -572,6 +722,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.10.1"
|
const Version = "1.12.0"
|
||||||
|
|
||||||
const grpcUA = "grpc-go/" + Version
|
const grpcUA = "grpc-go/" + Version
|
||||||
|
219
vendor/google.golang.org/grpc/server.go
generated
vendored
219
vendor/google.golang.org/grpc/server.go
generated
vendored
@ -37,6 +37,8 @@ import (
|
|||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/trace"
|
"golang.org/x/net/trace"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
@ -97,11 +99,19 @@ type Server struct {
|
|||||||
m map[string]*service // service name -> service info
|
m map[string]*service // service name -> service info
|
||||||
events trace.EventLog
|
events trace.EventLog
|
||||||
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
quitOnce sync.Once
|
quitOnce sync.Once
|
||||||
doneOnce sync.Once
|
doneOnce sync.Once
|
||||||
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
channelzRemoveOnce sync.Once
|
||||||
|
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||||
|
|
||||||
|
channelzID int64 // channelz unique identification number
|
||||||
|
czmu sync.RWMutex
|
||||||
|
callsStarted int64
|
||||||
|
callsFailed int64
|
||||||
|
callsSucceeded int64
|
||||||
|
lastCallStartedTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type options struct {
|
type options struct {
|
||||||
@ -216,7 +226,9 @@ func RPCDecompressor(dc Decompressor) ServerOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
|
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
|
||||||
// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
|
// If this is not set, gRPC uses the default limit.
|
||||||
|
//
|
||||||
|
// Deprecated: use MaxRecvMsgSize instead.
|
||||||
func MaxMsgSize(m int) ServerOption {
|
func MaxMsgSize(m int) ServerOption {
|
||||||
return MaxRecvMsgSize(m)
|
return MaxRecvMsgSize(m)
|
||||||
}
|
}
|
||||||
@ -343,6 +355,10 @@ func NewServer(opt ...ServerOption) *Server {
|
|||||||
_, file, line, _ := runtime.Caller(1)
|
_, file, line, _ := runtime.Caller(1)
|
||||||
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if channelz.IsOn() {
|
||||||
|
s.channelzID = channelz.RegisterServer(s, "")
|
||||||
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -458,6 +474,25 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
|
|||||||
return s.opts.creds.ServerHandshake(rawConn)
|
return s.opts.creds.ServerHandshake(rawConn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type listenSocket struct {
|
||||||
|
net.Listener
|
||||||
|
channelzID int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
|
return &channelz.SocketInternalMetric{
|
||||||
|
LocalAddr: l.Listener.Addr(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *listenSocket) Close() error {
|
||||||
|
err := l.Listener.Close()
|
||||||
|
if channelz.IsOn() {
|
||||||
|
channelz.RemoveEntry(l.channelzID)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Serve accepts incoming connections on the listener lis, creating a new
|
// Serve accepts incoming connections on the listener lis, creating a new
|
||||||
// ServerTransport and service goroutine for each. The service goroutines
|
// ServerTransport and service goroutine for each. The service goroutines
|
||||||
// read gRPC requests and then call the registered handlers to reply to them.
|
// read gRPC requests and then call the registered handlers to reply to them.
|
||||||
@ -486,13 +521,19 @@ func (s *Server) Serve(lis net.Listener) error {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
s.lis[lis] = true
|
ls := &listenSocket{Listener: lis}
|
||||||
|
s.lis[ls] = true
|
||||||
|
|
||||||
|
if channelz.IsOn() {
|
||||||
|
ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "")
|
||||||
|
}
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if s.lis != nil && s.lis[lis] {
|
if s.lis != nil && s.lis[ls] {
|
||||||
lis.Close()
|
ls.Close()
|
||||||
delete(s.lis, lis)
|
delete(s.lis, ls)
|
||||||
}
|
}
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}()
|
}()
|
||||||
@ -614,6 +655,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
|||||||
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
||||||
WriteBufferSize: s.opts.writeBufferSize,
|
WriteBufferSize: s.opts.writeBufferSize,
|
||||||
ReadBufferSize: s.opts.readBufferSize,
|
ReadBufferSize: s.opts.readBufferSize,
|
||||||
|
ChannelzParentID: s.channelzID,
|
||||||
}
|
}
|
||||||
st, err := transport.NewServerTransport("http2", c, config)
|
st, err := transport.NewServerTransport("http2", c, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -624,6 +666,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
|||||||
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
|
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return st
|
return st
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -751,6 +794,38 @@ func (s *Server) removeConn(c io.Closer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChannelzMetric returns ServerInternalMetric of current server.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric {
|
||||||
|
s.czmu.RLock()
|
||||||
|
defer s.czmu.RUnlock()
|
||||||
|
return &channelz.ServerInternalMetric{
|
||||||
|
CallsStarted: s.callsStarted,
|
||||||
|
CallsSucceeded: s.callsSucceeded,
|
||||||
|
CallsFailed: s.callsFailed,
|
||||||
|
LastCallStartedTimestamp: s.lastCallStartedTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) incrCallsStarted() {
|
||||||
|
s.czmu.Lock()
|
||||||
|
s.callsStarted++
|
||||||
|
s.lastCallStartedTime = time.Now()
|
||||||
|
s.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) incrCallsSucceeded() {
|
||||||
|
s.czmu.Lock()
|
||||||
|
s.callsSucceeded++
|
||||||
|
s.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) incrCallsFailed() {
|
||||||
|
s.czmu.Lock()
|
||||||
|
s.callsFailed++
|
||||||
|
s.czmu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||||
var (
|
var (
|
||||||
outPayload *stats.OutPayload
|
outPayload *stats.OutPayload
|
||||||
@ -775,15 +850,27 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||||
|
if channelz.IsOn() {
|
||||||
|
s.incrCallsStarted()
|
||||||
|
defer func() {
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
s.incrCallsFailed()
|
||||||
|
} else {
|
||||||
|
s.incrCallsSucceeded()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
sh := s.opts.statsHandler
|
sh := s.opts.statsHandler
|
||||||
if sh != nil {
|
if sh != nil {
|
||||||
|
beginTime := time.Now()
|
||||||
begin := &stats.Begin{
|
begin := &stats.Begin{
|
||||||
BeginTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
}
|
}
|
||||||
sh.HandleRPC(stream.Context(), begin)
|
sh.HandleRPC(stream.Context(), begin)
|
||||||
defer func() {
|
defer func() {
|
||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
EndTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
|
EndTime: time.Now(),
|
||||||
}
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
end.Error = toRPCErr(err)
|
end.Error = toRPCErr(err)
|
||||||
@ -867,6 +954,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if channelz.IsOn() {
|
||||||
|
t.IncrMsgRecv()
|
||||||
|
}
|
||||||
if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
|
if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
|
||||||
if e := t.WriteStatus(stream, st); e != nil {
|
if e := t.WriteStatus(stream, st); e != nil {
|
||||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||||
@ -917,12 +1007,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
|
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||||
|
reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
|
||||||
if appErr != nil {
|
if appErr != nil {
|
||||||
appStatus, ok := status.FromError(appErr)
|
appStatus, ok := status.FromError(appErr)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Convert appErr if it is not a grpc status error.
|
// Convert appErr if it is not a grpc status error.
|
||||||
appErr = status.Error(convertCode(appErr), appErr.Error())
|
appErr = status.Error(codes.Unknown, appErr.Error())
|
||||||
appStatus, _ = status.FromError(appErr)
|
appStatus, _ = status.FromError(appErr)
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
@ -965,6 +1056,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if channelz.IsOn() {
|
||||||
|
t.IncrMsgSent()
|
||||||
|
}
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||||
}
|
}
|
||||||
@ -975,15 +1069,27 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||||
|
if channelz.IsOn() {
|
||||||
|
s.incrCallsStarted()
|
||||||
|
defer func() {
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
s.incrCallsFailed()
|
||||||
|
} else {
|
||||||
|
s.incrCallsSucceeded()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
sh := s.opts.statsHandler
|
sh := s.opts.statsHandler
|
||||||
if sh != nil {
|
if sh != nil {
|
||||||
|
beginTime := time.Now()
|
||||||
begin := &stats.Begin{
|
begin := &stats.Begin{
|
||||||
BeginTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
}
|
}
|
||||||
sh.HandleRPC(stream.Context(), begin)
|
sh.HandleRPC(stream.Context(), begin)
|
||||||
defer func() {
|
defer func() {
|
||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
EndTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
|
EndTime: time.Now(),
|
||||||
}
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
end.Error = toRPCErr(err)
|
end.Error = toRPCErr(err)
|
||||||
@ -991,7 +1097,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||||||
sh.HandleRPC(stream.Context(), end)
|
sh.HandleRPC(stream.Context(), end)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||||
ss := &serverStream{
|
ss := &serverStream{
|
||||||
|
ctx: ctx,
|
||||||
t: t,
|
t: t,
|
||||||
s: stream,
|
s: stream,
|
||||||
p: &parser{r: stream},
|
p: &parser{r: stream},
|
||||||
@ -1065,7 +1173,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||||||
case transport.StreamError:
|
case transport.StreamError:
|
||||||
appStatus = status.New(err.Code, err.Desc)
|
appStatus = status.New(err.Code, err.Desc)
|
||||||
default:
|
default:
|
||||||
appStatus = status.New(convertCode(appErr), appErr.Error())
|
appStatus = status.New(codes.Unknown, appErr.Error())
|
||||||
}
|
}
|
||||||
appErr = appStatus.Err()
|
appErr = appStatus.Err()
|
||||||
}
|
}
|
||||||
@ -1085,7 +1193,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||||||
ss.mu.Unlock()
|
ss.mu.Unlock()
|
||||||
}
|
}
|
||||||
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
|
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||||
@ -1167,6 +1274,42 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The key to save ServerTransportStream in the context.
|
||||||
|
type streamKey struct{}
|
||||||
|
|
||||||
|
// NewContextWithServerTransportStream creates a new context from ctx and
|
||||||
|
// attaches stream to it.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
|
||||||
|
return context.WithValue(ctx, streamKey{}, stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTransportStream is a minimal interface that a transport stream must
|
||||||
|
// implement. This can be used to mock an actual transport stream for tests of
|
||||||
|
// handler code that use, for example, grpc.SetHeader (which requires some
|
||||||
|
// stream to be in context).
|
||||||
|
//
|
||||||
|
// See also NewContextWithServerTransportStream.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
type ServerTransportStream interface {
|
||||||
|
Method() string
|
||||||
|
SetHeader(md metadata.MD) error
|
||||||
|
SendHeader(md metadata.MD) error
|
||||||
|
SetTrailer(md metadata.MD) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTransportStreamFromContext returns the ServerTransportStream saved in
|
||||||
|
// ctx. Returns nil if the given context has no stream associated with it
|
||||||
|
// (which implies it is not an RPC invocation context).
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
|
||||||
|
s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
// Stop stops the gRPC server. It immediately closes all open
|
// Stop stops the gRPC server. It immediately closes all open
|
||||||
// connections and listeners.
|
// connections and listeners.
|
||||||
// It cancels all active RPCs on the server side and the corresponding
|
// It cancels all active RPCs on the server side and the corresponding
|
||||||
@ -1184,6 +1327,12 @@ func (s *Server) Stop() {
|
|||||||
})
|
})
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
s.channelzRemoveOnce.Do(func() {
|
||||||
|
if channelz.IsOn() {
|
||||||
|
channelz.RemoveEntry(s.channelzID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
listeners := s.lis
|
listeners := s.lis
|
||||||
s.lis = nil
|
s.lis = nil
|
||||||
@ -1222,11 +1371,17 @@ func (s *Server) GracefulStop() {
|
|||||||
})
|
})
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
s.channelzRemoveOnce.Do(func() {
|
||||||
|
if channelz.IsOn() {
|
||||||
|
channelz.RemoveEntry(s.channelzID)
|
||||||
|
}
|
||||||
|
})
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if s.conns == nil {
|
if s.conns == nil {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for lis := range s.lis {
|
for lis := range s.lis {
|
||||||
lis.Close()
|
lis.Close()
|
||||||
}
|
}
|
||||||
@ -1287,8 +1442,8 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
|||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
stream, ok := transport.StreamFromContext(ctx)
|
stream := ServerTransportStreamFromContext(ctx)
|
||||||
if !ok {
|
if stream == nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||||
}
|
}
|
||||||
return stream.SetHeader(md)
|
return stream.SetHeader(md)
|
||||||
@ -1297,15 +1452,11 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
|||||||
// SendHeader sends header metadata. It may be called at most once.
|
// SendHeader sends header metadata. It may be called at most once.
|
||||||
// The provided md and headers set by SetHeader() will be sent.
|
// The provided md and headers set by SetHeader() will be sent.
|
||||||
func SendHeader(ctx context.Context, md metadata.MD) error {
|
func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||||
stream, ok := transport.StreamFromContext(ctx)
|
stream := ServerTransportStreamFromContext(ctx)
|
||||||
if !ok {
|
if stream == nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||||
}
|
}
|
||||||
t := stream.ServerTransport()
|
if err := stream.SendHeader(md); err != nil {
|
||||||
if t == nil {
|
|
||||||
grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
|
|
||||||
}
|
|
||||||
if err := t.WriteHeader(stream, md); err != nil {
|
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1317,9 +1468,19 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
|
|||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
stream, ok := transport.StreamFromContext(ctx)
|
stream := ServerTransportStreamFromContext(ctx)
|
||||||
if !ok {
|
if stream == nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||||
}
|
}
|
||||||
return stream.SetTrailer(md)
|
return stream.SetTrailer(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Method returns the method string for the server context. The returned
|
||||||
|
// string is in the format of "/service/method".
|
||||||
|
func Method(ctx context.Context) (string, bool) {
|
||||||
|
s := ServerTransportStreamFromContext(ctx)
|
||||||
|
if s == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return s.Method(), true
|
||||||
|
}
|
||||||
|
15
vendor/google.golang.org/grpc/service_config.go
generated
vendored
15
vendor/google.golang.org/grpc/service_config.go
generated
vendored
@ -32,7 +32,8 @@ const maxInt = int(^uint(0) >> 1)
|
|||||||
|
|
||||||
// MethodConfig defines the configuration recommended by the service providers for a
|
// MethodConfig defines the configuration recommended by the service providers for a
|
||||||
// particular method.
|
// particular method.
|
||||||
// DEPRECATED: Users should not use this struct. Service config should be received
|
//
|
||||||
|
// Deprecated: Users should not use this struct. Service config should be received
|
||||||
// through name resolver, as specified here
|
// through name resolver, as specified here
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||||
type MethodConfig struct {
|
type MethodConfig struct {
|
||||||
@ -59,7 +60,8 @@ type MethodConfig struct {
|
|||||||
|
|
||||||
// ServiceConfig is provided by the service provider and contains parameters for how
|
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||||
// clients that connect to the service should behave.
|
// clients that connect to the service should behave.
|
||||||
// DEPRECATED: Users should not use this struct. Service config should be received
|
//
|
||||||
|
// Deprecated: Users should not use this struct. Service config should be received
|
||||||
// through name resolver, as specified here
|
// through name resolver, as specified here
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||||
type ServiceConfig struct {
|
type ServiceConfig struct {
|
||||||
@ -71,6 +73,8 @@ type ServiceConfig struct {
|
|||||||
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
|
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
|
||||||
// Otherwise, the method has no MethodConfig to use.
|
// Otherwise, the method has no MethodConfig to use.
|
||||||
Methods map[string]MethodConfig
|
Methods map[string]MethodConfig
|
||||||
|
|
||||||
|
stickinessMetadataKey *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseDuration(s *string) (*time.Duration, error) {
|
func parseDuration(s *string) (*time.Duration, error) {
|
||||||
@ -144,8 +148,9 @@ type jsonMC struct {
|
|||||||
|
|
||||||
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
||||||
type jsonSC struct {
|
type jsonSC struct {
|
||||||
LoadBalancingPolicy *string
|
LoadBalancingPolicy *string
|
||||||
MethodConfig *[]jsonMC
|
StickinessMetadataKey *string
|
||||||
|
MethodConfig *[]jsonMC
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseServiceConfig(js string) (ServiceConfig, error) {
|
func parseServiceConfig(js string) (ServiceConfig, error) {
|
||||||
@ -158,6 +163,8 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
|
|||||||
sc := ServiceConfig{
|
sc := ServiceConfig{
|
||||||
LB: rsc.LoadBalancingPolicy,
|
LB: rsc.LoadBalancingPolicy,
|
||||||
Methods: make(map[string]MethodConfig),
|
Methods: make(map[string]MethodConfig),
|
||||||
|
|
||||||
|
stickinessMetadataKey: rsc.StickinessMetadataKey,
|
||||||
}
|
}
|
||||||
if rsc.MethodConfig == nil {
|
if rsc.MethodConfig == nil {
|
||||||
return sc, nil
|
return sc, nil
|
||||||
|
2
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
2
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
@ -169,6 +169,8 @@ func (s *OutTrailer) isRPCStats() {}
|
|||||||
type End struct {
|
type End struct {
|
||||||
// Client is true if this End is from client side.
|
// Client is true if this End is from client side.
|
||||||
Client bool
|
Client bool
|
||||||
|
// BeginTime is the time when the RPC began.
|
||||||
|
BeginTime time.Time
|
||||||
// EndTime is the time when the RPC ends.
|
// EndTime is the time when the RPC ends.
|
||||||
EndTime time.Time
|
EndTime time.Time
|
||||||
// Error is the error the RPC ended with. It is an error generated from
|
// Error is the error the RPC ended with. It is an error generated from
|
||||||
|
14
vendor/google.golang.org/grpc/status/status.go
generated
vendored
14
vendor/google.golang.org/grpc/status/status.go
generated
vendored
@ -46,7 +46,7 @@ func (se *statusError) Error() string {
|
|||||||
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (se *statusError) status() *Status {
|
func (se *statusError) GRPCStatus() *Status {
|
||||||
return &Status{s: (*spb.Status)(se)}
|
return &Status{s: (*spb.Status)(se)}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,14 +120,14 @@ func FromProto(s *spb.Status) *Status {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FromError returns a Status representing err if it was produced from this
|
// FromError returns a Status representing err if it was produced from this
|
||||||
// package. Otherwise, ok is false and a Status is returned with codes.Unknown
|
// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
|
||||||
// and the original error message.
|
// Status is returned with codes.Unknown and the original error message.
|
||||||
func FromError(err error) (s *Status, ok bool) {
|
func FromError(err error) (s *Status, ok bool) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
||||||
}
|
}
|
||||||
if se, ok := err.(*statusError); ok {
|
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||||
return se.status(), true
|
return se.GRPCStatus(), true
|
||||||
}
|
}
|
||||||
return New(codes.Unknown, err.Error()), false
|
return New(codes.Unknown, err.Error()), false
|
||||||
}
|
}
|
||||||
@ -182,8 +182,8 @@ func Code(err error) codes.Code {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return codes.OK
|
return codes.OK
|
||||||
}
|
}
|
||||||
if se, ok := err.(*statusError); ok {
|
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||||
return se.status().Code()
|
return se.GRPCStatus().Code()
|
||||||
}
|
}
|
||||||
return codes.Unknown
|
return codes.Unknown
|
||||||
}
|
}
|
||||||
|
324
vendor/google.golang.org/grpc/stream.go
generated
vendored
324
vendor/google.golang.org/grpc/stream.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
|||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/trace"
|
"golang.org/x/net/trace"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
@ -36,7 +37,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// StreamHandler defines the handler called by gRPC server to complete the
|
// StreamHandler defines the handler called by gRPC server to complete the
|
||||||
// execution of a streaming RPC.
|
// execution of a streaming RPC. If a StreamHandler returns an error, it
|
||||||
|
// should be produced by the status package, or else gRPC will use
|
||||||
|
// codes.Unknown as the status code and err.Error() as the status message
|
||||||
|
// of the RPC.
|
||||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||||
|
|
||||||
// StreamDesc represents a streaming RPC service's method specification.
|
// StreamDesc represents a streaming RPC service's method specification.
|
||||||
@ -99,6 +103,10 @@ type ClientStream interface {
|
|||||||
// NewStream creates a new Stream for the client side. This is typically
|
// NewStream creates a new Stream for the client side. This is typically
|
||||||
// called by generated code.
|
// called by generated code.
|
||||||
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||||
|
// allow interceptor to see all applicable call options, which means those
|
||||||
|
// configured as defaults from dial option as well as per-call options
|
||||||
|
opts = combine(cc.dopts.callOptions, opts)
|
||||||
|
|
||||||
if cc.dopts.streamInt != nil {
|
if cc.dopts.streamInt != nil {
|
||||||
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
|
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
|
||||||
}
|
}
|
||||||
@ -114,6 +122,14 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||||
|
if channelz.IsOn() {
|
||||||
|
cc.incrCallsStarted()
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
cc.incrCallsFailed()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
c := defaultCallInfo()
|
c := defaultCallInfo()
|
||||||
mc := cc.GetMethodConfig(method)
|
mc := cc.GetMethodConfig(method)
|
||||||
if mc.WaitForReady != nil {
|
if mc.WaitForReady != nil {
|
||||||
@ -137,7 +153,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
opts = append(cc.dopts.callOptions, opts...)
|
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
if err := o.before(c); err != nil {
|
if err := o.before(c); err != nil {
|
||||||
return nil, toRPCErr(err)
|
return nil, toRPCErr(err)
|
||||||
@ -202,11 +217,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
}
|
}
|
||||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
||||||
sh := cc.dopts.copts.StatsHandler
|
sh := cc.dopts.copts.StatsHandler
|
||||||
|
var beginTime time.Time
|
||||||
if sh != nil {
|
if sh != nil {
|
||||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||||
|
beginTime = time.Now()
|
||||||
begin := &stats.Begin{
|
begin := &stats.Begin{
|
||||||
Client: true,
|
Client: true,
|
||||||
BeginTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
FailFast: c.failFast,
|
FailFast: c.failFast,
|
||||||
}
|
}
|
||||||
sh.HandleRPC(ctx, begin)
|
sh.HandleRPC(ctx, begin)
|
||||||
@ -214,8 +231,10 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// Only handle end stats if err != nil.
|
// Only handle end stats if err != nil.
|
||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
Client: true,
|
Client: true,
|
||||||
Error: err,
|
Error: err,
|
||||||
|
BeginTime: beginTime,
|
||||||
|
EndTime: time.Now(),
|
||||||
}
|
}
|
||||||
sh.HandleRPC(ctx, end)
|
sh.HandleRPC(ctx, end)
|
||||||
}
|
}
|
||||||
@ -259,28 +278,29 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
c.stream = s
|
|
||||||
cs := &clientStream{
|
cs := &clientStream{
|
||||||
opts: opts,
|
opts: opts,
|
||||||
c: c,
|
c: c,
|
||||||
|
cc: cc,
|
||||||
desc: desc,
|
desc: desc,
|
||||||
codec: c.codec,
|
codec: c.codec,
|
||||||
cp: cp,
|
cp: cp,
|
||||||
dc: cc.dopts.dc,
|
|
||||||
comp: comp,
|
comp: comp,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
|
attempt: &csAttempt{
|
||||||
done: done,
|
t: t,
|
||||||
t: t,
|
s: s,
|
||||||
s: s,
|
p: &parser{r: s},
|
||||||
p: &parser{r: s},
|
done: done,
|
||||||
|
dc: cc.dopts.dc,
|
||||||
tracing: EnableTracing,
|
ctx: ctx,
|
||||||
trInfo: trInfo,
|
trInfo: trInfo,
|
||||||
|
statsHandler: sh,
|
||||||
statsCtx: ctx,
|
beginTime: beginTime,
|
||||||
statsHandler: cc.dopts.copts.StatsHandler,
|
},
|
||||||
}
|
}
|
||||||
|
cs.c.stream = cs
|
||||||
|
cs.attempt.cs = cs
|
||||||
if desc != unaryStreamDesc {
|
if desc != unaryStreamDesc {
|
||||||
// Listen on cc and stream contexts to cleanup when the user closes the
|
// Listen on cc and stream contexts to cleanup when the user closes the
|
||||||
// ClientConn or cancels the stream context. In all other cases, an error
|
// ClientConn or cancels the stream context. In all other cases, an error
|
||||||
@ -292,7 +312,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
case <-cc.ctx.Done():
|
case <-cc.ctx.Done():
|
||||||
cs.finish(ErrClientConnClosing)
|
cs.finish(ErrClientConnClosing)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
cs.finish(toRPCErr(s.Context().Err()))
|
cs.finish(toRPCErr(ctx.Err()))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -303,46 +323,57 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
type clientStream struct {
|
type clientStream struct {
|
||||||
opts []CallOption
|
opts []CallOption
|
||||||
c *callInfo
|
c *callInfo
|
||||||
|
cc *ClientConn
|
||||||
|
desc *StreamDesc
|
||||||
|
|
||||||
|
codec baseCodec
|
||||||
|
cp Compressor
|
||||||
|
comp encoding.Compressor
|
||||||
|
|
||||||
|
cancel context.CancelFunc // cancels all attempts
|
||||||
|
|
||||||
|
sentLast bool // sent an end stream
|
||||||
|
|
||||||
|
mu sync.Mutex // guards finished
|
||||||
|
finished bool // TODO: replace with atomic cmpxchg or sync.Once?
|
||||||
|
|
||||||
|
attempt *csAttempt // the active client stream attempt
|
||||||
|
// TODO(hedging): hedging will have multiple attempts simultaneously.
|
||||||
|
}
|
||||||
|
|
||||||
|
// csAttempt implements a single transport stream attempt within a
|
||||||
|
// clientStream.
|
||||||
|
type csAttempt struct {
|
||||||
|
cs *clientStream
|
||||||
t transport.ClientTransport
|
t transport.ClientTransport
|
||||||
s *transport.Stream
|
s *transport.Stream
|
||||||
p *parser
|
p *parser
|
||||||
desc *StreamDesc
|
done func(balancer.DoneInfo)
|
||||||
|
|
||||||
codec baseCodec
|
|
||||||
cp Compressor
|
|
||||||
dc Decompressor
|
dc Decompressor
|
||||||
comp encoding.Compressor
|
|
||||||
decomp encoding.Compressor
|
decomp encoding.Compressor
|
||||||
decompSet bool
|
decompSet bool
|
||||||
|
|
||||||
// cancel is only called when RecvMsg() returns non-nil error, which means
|
ctx context.Context // the application's context, wrapped by stats/tracing
|
||||||
// the stream finishes with error or with io.EOF.
|
|
||||||
cancel context.CancelFunc
|
|
||||||
|
|
||||||
tracing bool // set to EnableTracing when the clientStream is created.
|
mu sync.Mutex // guards trInfo.tr
|
||||||
|
// trInfo.tr is set when created (if EnableTracing is true),
|
||||||
mu sync.Mutex
|
// and cleared when the finish method is called.
|
||||||
done func(balancer.DoneInfo)
|
|
||||||
sentLast bool // sent an end stream
|
|
||||||
finished bool
|
|
||||||
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
|
||||||
// and is set to nil when the clientStream's finish method is called.
|
|
||||||
trInfo traceInfo
|
trInfo traceInfo
|
||||||
|
|
||||||
// statsCtx keeps the user context for stats handling.
|
|
||||||
// All stats collection should use the statsCtx (instead of the stream context)
|
|
||||||
// so that all the generated stats for a particular RPC can be associated in the processing phase.
|
|
||||||
statsCtx context.Context
|
|
||||||
statsHandler stats.Handler
|
statsHandler stats.Handler
|
||||||
|
beginTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Context() context.Context {
|
func (cs *clientStream) Context() context.Context {
|
||||||
return cs.s.Context()
|
// TODO(retry): commit the current attempt (the context has peer-aware data).
|
||||||
|
return cs.attempt.context()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Header() (metadata.MD, error) {
|
func (cs *clientStream) Header() (metadata.MD, error) {
|
||||||
m, err := cs.s.Header()
|
m, err := cs.attempt.header()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||||
err = toRPCErr(err)
|
err = toRPCErr(err)
|
||||||
cs.finish(err)
|
cs.finish(err)
|
||||||
}
|
}
|
||||||
@ -350,20 +381,68 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Trailer() metadata.MD {
|
func (cs *clientStream) Trailer() metadata.MD {
|
||||||
return cs.s.Trailer()
|
// TODO(retry): on error, maybe retry (trailers-only).
|
||||||
|
return cs.attempt.trailer()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||||
// TODO: Check cs.sentLast and error if we already ended the stream.
|
// TODO(retry): buffer message for replaying if not committed.
|
||||||
if cs.tracing {
|
return cs.attempt.sendMsg(m)
|
||||||
cs.mu.Lock()
|
}
|
||||||
if cs.trInfo.tr != nil {
|
|
||||||
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||||
}
|
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||||
cs.mu.Unlock()
|
return cs.attempt.recvMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) CloseSend() error {
|
||||||
|
cs.attempt.closeSend()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) finish(err error) {
|
||||||
|
if err == io.EOF {
|
||||||
|
// Ending a stream with EOF indicates a success.
|
||||||
|
err = nil
|
||||||
}
|
}
|
||||||
|
cs.mu.Lock()
|
||||||
|
if cs.finished {
|
||||||
|
cs.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cs.finished = true
|
||||||
|
cs.mu.Unlock()
|
||||||
|
if channelz.IsOn() {
|
||||||
|
if err != nil {
|
||||||
|
cs.cc.incrCallsFailed()
|
||||||
|
} else {
|
||||||
|
cs.cc.incrCallsSucceeded()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO(retry): commit current attempt if necessary.
|
||||||
|
cs.attempt.finish(err)
|
||||||
|
for _, o := range cs.opts {
|
||||||
|
o.after(cs.c)
|
||||||
|
}
|
||||||
|
cs.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) context() context.Context {
|
||||||
|
return a.s.Context()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) header() (metadata.MD, error) {
|
||||||
|
return a.s.Header()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) trailer() metadata.MD {
|
||||||
|
return a.s.Trailer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) sendMsg(m interface{}) (err error) {
|
||||||
// TODO Investigate how to signal the stats handling party.
|
// TODO Investigate how to signal the stats handling party.
|
||||||
// generate error stats if err != nil && err != io.EOF?
|
// generate error stats if err != nil && err != io.EOF?
|
||||||
|
cs := a.cs
|
||||||
defer func() {
|
defer func() {
|
||||||
// For non-client-streaming RPCs, we return nil instead of EOF on success
|
// For non-client-streaming RPCs, we return nil instead of EOF on success
|
||||||
// because the generated code requires it. finish is not called; RecvMsg()
|
// because the generated code requires it. finish is not called; RecvMsg()
|
||||||
@ -372,14 +451,23 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
// Call finish for errors generated by this SendMsg call. (Transport
|
// Call finish on the client stream for errors generated by this SendMsg
|
||||||
|
// call, as these indicate problems created by this client. (Transport
|
||||||
// errors are converted to an io.EOF error below; the real error will be
|
// errors are converted to an io.EOF error below; the real error will be
|
||||||
// returned from RecvMsg eventually in that case.)
|
// returned from RecvMsg eventually in that case, or be retried.)
|
||||||
cs.finish(err)
|
cs.finish(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
// TODO: Check cs.sentLast and error if we already ended the stream.
|
||||||
|
if EnableTracing {
|
||||||
|
a.mu.Lock()
|
||||||
|
if a.trInfo.tr != nil {
|
||||||
|
a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||||
|
}
|
||||||
|
a.mu.Unlock()
|
||||||
|
}
|
||||||
var outPayload *stats.OutPayload
|
var outPayload *stats.OutPayload
|
||||||
if cs.statsHandler != nil {
|
if a.statsHandler != nil {
|
||||||
outPayload = &stats.OutPayload{
|
outPayload = &stats.OutPayload{
|
||||||
Client: true,
|
Client: true,
|
||||||
}
|
}
|
||||||
@ -394,18 +482,22 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||||||
if !cs.desc.ClientStreams {
|
if !cs.desc.ClientStreams {
|
||||||
cs.sentLast = true
|
cs.sentLast = true
|
||||||
}
|
}
|
||||||
err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams})
|
err = a.t.Write(a.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if outPayload != nil {
|
if outPayload != nil {
|
||||||
outPayload.SentTime = time.Now()
|
outPayload.SentTime = time.Now()
|
||||||
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
|
a.statsHandler.HandleRPC(a.ctx, outPayload)
|
||||||
|
}
|
||||||
|
if channelz.IsOn() {
|
||||||
|
a.t.IncrMsgSent()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
func (a *csAttempt) recvMsg(m interface{}) (err error) {
|
||||||
|
cs := a.cs
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil || !cs.desc.ServerStreams {
|
if err != nil || !cs.desc.ServerStreams {
|
||||||
// err != nil or non-server-streaming indicates end of stream.
|
// err != nil or non-server-streaming indicates end of stream.
|
||||||
@ -413,46 +505,49 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
var inPayload *stats.InPayload
|
var inPayload *stats.InPayload
|
||||||
if cs.statsHandler != nil {
|
if a.statsHandler != nil {
|
||||||
inPayload = &stats.InPayload{
|
inPayload = &stats.InPayload{
|
||||||
Client: true,
|
Client: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !cs.decompSet {
|
if !a.decompSet {
|
||||||
// Block until we receive headers containing received message encoding.
|
// Block until we receive headers containing received message encoding.
|
||||||
if ct := cs.s.RecvCompress(); ct != "" && ct != encoding.Identity {
|
if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
|
||||||
if cs.dc == nil || cs.dc.Type() != ct {
|
if a.dc == nil || a.dc.Type() != ct {
|
||||||
// No configured decompressor, or it does not match the incoming
|
// No configured decompressor, or it does not match the incoming
|
||||||
// message encoding; attempt to find a registered compressor that does.
|
// message encoding; attempt to find a registered compressor that does.
|
||||||
cs.dc = nil
|
a.dc = nil
|
||||||
cs.decomp = encoding.GetCompressor(ct)
|
a.decomp = encoding.GetCompressor(ct)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// No compression is used; disable our decompressor.
|
// No compression is used; disable our decompressor.
|
||||||
cs.dc = nil
|
a.dc = nil
|
||||||
}
|
}
|
||||||
// Only initialize this state once per stream.
|
// Only initialize this state once per stream.
|
||||||
cs.decompSet = true
|
a.decompSet = true
|
||||||
}
|
}
|
||||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload, cs.decomp)
|
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
if statusErr := cs.s.Status().Err(); statusErr != nil {
|
if statusErr := a.s.Status().Err(); statusErr != nil {
|
||||||
return statusErr
|
return statusErr
|
||||||
}
|
}
|
||||||
return io.EOF // indicates successful end of stream.
|
return io.EOF // indicates successful end of stream.
|
||||||
}
|
}
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
if cs.tracing {
|
if EnableTracing {
|
||||||
cs.mu.Lock()
|
a.mu.Lock()
|
||||||
if cs.trInfo.tr != nil {
|
if a.trInfo.tr != nil {
|
||||||
cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||||
}
|
}
|
||||||
cs.mu.Unlock()
|
a.mu.Unlock()
|
||||||
}
|
}
|
||||||
if inPayload != nil {
|
if inPayload != nil {
|
||||||
cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
|
a.statsHandler.HandleRPC(a.ctx, inPayload)
|
||||||
|
}
|
||||||
|
if channelz.IsOn() {
|
||||||
|
a.t.IncrMsgRecv()
|
||||||
}
|
}
|
||||||
if cs.desc.ServerStreams {
|
if cs.desc.ServerStreams {
|
||||||
// Subsequent messages should be received by subsequent RecvMsg calls.
|
// Subsequent messages should be received by subsequent RecvMsg calls.
|
||||||
@ -461,74 +556,59 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
|||||||
|
|
||||||
// Special handling for non-server-stream rpcs.
|
// Special handling for non-server-stream rpcs.
|
||||||
// This recv expects EOF or errors, so we don't collect inPayload.
|
// This recv expects EOF or errors, so we don't collect inPayload.
|
||||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil, cs.decomp)
|
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||||
}
|
}
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return cs.s.Status().Err() // non-server streaming Recv returns nil on success
|
return a.s.Status().Err() // non-server streaming Recv returns nil on success
|
||||||
}
|
}
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) CloseSend() error {
|
func (a *csAttempt) closeSend() {
|
||||||
|
cs := a.cs
|
||||||
if cs.sentLast {
|
if cs.sentLast {
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
cs.sentLast = true
|
cs.sentLast = true
|
||||||
cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
|
cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true})
|
||||||
// We ignore errors from Write and always return nil here. Any error it
|
// We ignore errors from Write. Any error it would return would also be
|
||||||
// would return would also be returned by a subsequent RecvMsg call, and the
|
// returned by a subsequent RecvMsg call, and the user is supposed to always
|
||||||
// user is supposed to always finish the stream by calling RecvMsg until it
|
// finish the stream by calling RecvMsg until it returns err != nil.
|
||||||
// returns err != nil.
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) finish(err error) {
|
func (a *csAttempt) finish(err error) {
|
||||||
if err == io.EOF {
|
a.mu.Lock()
|
||||||
// Ending a stream with EOF indicates a success.
|
a.t.CloseStream(a.s, err)
|
||||||
err = nil
|
|
||||||
}
|
if a.done != nil {
|
||||||
cs.mu.Lock()
|
a.done(balancer.DoneInfo{
|
||||||
defer cs.mu.Unlock()
|
|
||||||
if cs.finished {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cs.finished = true
|
|
||||||
cs.t.CloseStream(cs.s, err)
|
|
||||||
for _, o := range cs.opts {
|
|
||||||
o.after(cs.c)
|
|
||||||
}
|
|
||||||
if cs.done != nil {
|
|
||||||
cs.done(balancer.DoneInfo{
|
|
||||||
Err: err,
|
Err: err,
|
||||||
BytesSent: true,
|
BytesSent: true,
|
||||||
BytesReceived: cs.s.BytesReceived(),
|
BytesReceived: a.s.BytesReceived(),
|
||||||
})
|
})
|
||||||
cs.done = nil
|
|
||||||
}
|
}
|
||||||
if cs.statsHandler != nil {
|
if a.statsHandler != nil {
|
||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
Client: true,
|
Client: true,
|
||||||
EndTime: time.Now(),
|
BeginTime: a.beginTime,
|
||||||
Error: err,
|
EndTime: time.Now(),
|
||||||
|
Error: err,
|
||||||
}
|
}
|
||||||
cs.statsHandler.HandleRPC(cs.statsCtx, end)
|
a.statsHandler.HandleRPC(a.ctx, end)
|
||||||
}
|
}
|
||||||
cs.cancel()
|
if a.trInfo.tr != nil {
|
||||||
if !cs.tracing {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cs.trInfo.tr != nil {
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
cs.trInfo.tr.LazyPrintf("RPC: [OK]")
|
a.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||||
} else {
|
} else {
|
||||||
cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||||
cs.trInfo.tr.SetError()
|
a.trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
cs.trInfo.tr.Finish()
|
a.trInfo.tr.Finish()
|
||||||
cs.trInfo.tr = nil
|
a.trInfo.tr = nil
|
||||||
}
|
}
|
||||||
|
a.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerStream defines the interface a server stream has to satisfy.
|
// ServerStream defines the interface a server stream has to satisfy.
|
||||||
@ -552,6 +632,7 @@ type ServerStream interface {
|
|||||||
|
|
||||||
// serverStream implements a server side Stream.
|
// serverStream implements a server side Stream.
|
||||||
type serverStream struct {
|
type serverStream struct {
|
||||||
|
ctx context.Context
|
||||||
t transport.ServerTransport
|
t transport.ServerTransport
|
||||||
s *transport.Stream
|
s *transport.Stream
|
||||||
p *parser
|
p *parser
|
||||||
@ -572,7 +653,7 @@ type serverStream struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) Context() context.Context {
|
func (ss *serverStream) Context() context.Context {
|
||||||
return ss.s.Context()
|
return ss.ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) SetHeader(md metadata.MD) error {
|
func (ss *serverStream) SetHeader(md metadata.MD) error {
|
||||||
@ -591,7 +672,6 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
ss.s.SetTrailer(md)
|
ss.s.SetTrailer(md)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||||
@ -612,6 +692,9 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||||||
st, _ := status.FromError(toRPCErr(err))
|
st, _ := status.FromError(toRPCErr(err))
|
||||||
ss.t.WriteStatus(ss.s, st)
|
ss.t.WriteStatus(ss.s, st)
|
||||||
}
|
}
|
||||||
|
if channelz.IsOn() && err == nil {
|
||||||
|
ss.t.IncrMsgSent()
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
var outPayload *stats.OutPayload
|
var outPayload *stats.OutPayload
|
||||||
if ss.statsHandler != nil {
|
if ss.statsHandler != nil {
|
||||||
@ -652,6 +735,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||||||
st, _ := status.FromError(toRPCErr(err))
|
st, _ := status.FromError(toRPCErr(err))
|
||||||
ss.t.WriteStatus(ss.s, st)
|
ss.t.WriteStatus(ss.s, st)
|
||||||
}
|
}
|
||||||
|
if channelz.IsOn() && err == nil {
|
||||||
|
ss.t.IncrMsgRecv()
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
var inPayload *stats.InPayload
|
var inPayload *stats.InPayload
|
||||||
if ss.statsHandler != nil {
|
if ss.statsHandler != nil {
|
||||||
@ -675,9 +761,5 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||||||
// MethodFromServerStream returns the method string for the input stream.
|
// MethodFromServerStream returns the method string for the input stream.
|
||||||
// The returned string is in the format of "/service/method".
|
// The returned string is in the format of "/service/method".
|
||||||
func MethodFromServerStream(stream ServerStream) (string, bool) {
|
func MethodFromServerStream(stream ServerStream) (string, bool) {
|
||||||
s, ok := transport.StreamFromContext(stream.Context())
|
return Method(stream.Context())
|
||||||
if !ok {
|
|
||||||
return "", ok
|
|
||||||
}
|
|
||||||
return s.Method(), ok
|
|
||||||
}
|
}
|
||||||
|
769
vendor/google.golang.org/grpc/transport/controlbuf.go
generated
vendored
Normal file
769
vendor/google.golang.org/grpc/transport/controlbuf.go
generated
vendored
Normal file
@ -0,0 +1,769 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/hpack"
|
||||||
|
)
|
||||||
|
|
||||||
|
type itemNode struct {
|
||||||
|
it interface{}
|
||||||
|
next *itemNode
|
||||||
|
}
|
||||||
|
|
||||||
|
type itemList struct {
|
||||||
|
head *itemNode
|
||||||
|
tail *itemNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (il *itemList) enqueue(i interface{}) {
|
||||||
|
n := &itemNode{it: i}
|
||||||
|
if il.tail == nil {
|
||||||
|
il.head, il.tail = n, n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
il.tail.next = n
|
||||||
|
il.tail = n
|
||||||
|
}
|
||||||
|
|
||||||
|
// peek returns the first item in the list without removing it from the
|
||||||
|
// list.
|
||||||
|
func (il *itemList) peek() interface{} {
|
||||||
|
return il.head.it
|
||||||
|
}
|
||||||
|
|
||||||
|
func (il *itemList) dequeue() interface{} {
|
||||||
|
if il.head == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i := il.head.it
|
||||||
|
il.head = il.head.next
|
||||||
|
if il.head == nil {
|
||||||
|
il.tail = nil
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (il *itemList) dequeueAll() *itemNode {
|
||||||
|
h := il.head
|
||||||
|
il.head, il.tail = nil, nil
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (il *itemList) isEmpty() bool {
|
||||||
|
return il.head == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following defines various control items which could flow through
|
||||||
|
// the control buffer of transport. They represent different aspects of
|
||||||
|
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||||
|
|
||||||
|
type headerFrame struct {
|
||||||
|
streamID uint32
|
||||||
|
hf []hpack.HeaderField
|
||||||
|
endStream bool // Valid on server side.
|
||||||
|
initStream func(uint32) (bool, error) // Used only on the client side.
|
||||||
|
onWrite func()
|
||||||
|
wq *writeQuota // write quota for the stream created.
|
||||||
|
cleanup *cleanupStream // Valid on the server side.
|
||||||
|
onOrphaned func(error) // Valid on client-side
|
||||||
|
}
|
||||||
|
|
||||||
|
type cleanupStream struct {
|
||||||
|
streamID uint32
|
||||||
|
idPtr *uint32
|
||||||
|
rst bool
|
||||||
|
rstCode http2.ErrCode
|
||||||
|
onWrite func()
|
||||||
|
}
|
||||||
|
|
||||||
|
type dataFrame struct {
|
||||||
|
streamID uint32
|
||||||
|
endStream bool
|
||||||
|
h []byte
|
||||||
|
d []byte
|
||||||
|
// onEachWrite is called every time
|
||||||
|
// a part of d is written out.
|
||||||
|
onEachWrite func()
|
||||||
|
}
|
||||||
|
|
||||||
|
type incomingWindowUpdate struct {
|
||||||
|
streamID uint32
|
||||||
|
increment uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type outgoingWindowUpdate struct {
|
||||||
|
streamID uint32
|
||||||
|
increment uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type incomingSettings struct {
|
||||||
|
ss []http2.Setting
|
||||||
|
}
|
||||||
|
|
||||||
|
type outgoingSettings struct {
|
||||||
|
ss []http2.Setting
|
||||||
|
}
|
||||||
|
|
||||||
|
type settingsAck struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type incomingGoAway struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type goAway struct {
|
||||||
|
code http2.ErrCode
|
||||||
|
debugData []byte
|
||||||
|
headsUp bool
|
||||||
|
closeConn bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type ping struct {
|
||||||
|
ack bool
|
||||||
|
data [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type outFlowControlSizeRequest struct {
|
||||||
|
resp chan uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type outStreamState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
active outStreamState = iota
|
||||||
|
empty
|
||||||
|
waitingOnStreamQuota
|
||||||
|
)
|
||||||
|
|
||||||
|
type outStream struct {
|
||||||
|
id uint32
|
||||||
|
state outStreamState
|
||||||
|
itl *itemList
|
||||||
|
bytesOutStanding int
|
||||||
|
wq *writeQuota
|
||||||
|
|
||||||
|
next *outStream
|
||||||
|
prev *outStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *outStream) deleteSelf() {
|
||||||
|
if s.prev != nil {
|
||||||
|
s.prev.next = s.next
|
||||||
|
}
|
||||||
|
if s.next != nil {
|
||||||
|
s.next.prev = s.prev
|
||||||
|
}
|
||||||
|
s.next, s.prev = nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type outStreamList struct {
|
||||||
|
// Following are sentinel objects that mark the
|
||||||
|
// beginning and end of the list. They do not
|
||||||
|
// contain any item lists. All valid objects are
|
||||||
|
// inserted in between them.
|
||||||
|
// This is needed so that an outStream object can
|
||||||
|
// deleteSelf() in O(1) time without knowing which
|
||||||
|
// list it belongs to.
|
||||||
|
head *outStream
|
||||||
|
tail *outStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOutStreamList() *outStreamList {
|
||||||
|
head, tail := new(outStream), new(outStream)
|
||||||
|
head.next = tail
|
||||||
|
tail.prev = head
|
||||||
|
return &outStreamList{
|
||||||
|
head: head,
|
||||||
|
tail: tail,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *outStreamList) enqueue(s *outStream) {
|
||||||
|
e := l.tail.prev
|
||||||
|
e.next = s
|
||||||
|
s.prev = e
|
||||||
|
s.next = l.tail
|
||||||
|
l.tail.prev = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove from the beginning of the list.
|
||||||
|
func (l *outStreamList) dequeue() *outStream {
|
||||||
|
b := l.head.next
|
||||||
|
if b == l.tail {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.deleteSelf()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
type controlBuffer struct {
|
||||||
|
ch chan struct{}
|
||||||
|
done <-chan struct{}
|
||||||
|
mu sync.Mutex
|
||||||
|
consumerWaiting bool
|
||||||
|
list *itemList
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||||
|
return &controlBuffer{
|
||||||
|
ch: make(chan struct{}, 1),
|
||||||
|
list: &itemList{},
|
||||||
|
done: done,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controlBuffer) put(it interface{}) error {
|
||||||
|
_, err := c.executeAndPut(nil, it)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||||
|
var wakeUp bool
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.err != nil {
|
||||||
|
c.mu.Unlock()
|
||||||
|
return false, c.err
|
||||||
|
}
|
||||||
|
if f != nil {
|
||||||
|
if !f(it) { // f wasn't successful
|
||||||
|
c.mu.Unlock()
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.consumerWaiting {
|
||||||
|
wakeUp = true
|
||||||
|
c.consumerWaiting = false
|
||||||
|
}
|
||||||
|
c.list.enqueue(it)
|
||||||
|
c.mu.Unlock()
|
||||||
|
if wakeUp {
|
||||||
|
select {
|
||||||
|
case c.ch <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||||
|
for {
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.err != nil {
|
||||||
|
c.mu.Unlock()
|
||||||
|
return nil, c.err
|
||||||
|
}
|
||||||
|
if !c.list.isEmpty() {
|
||||||
|
h := c.list.dequeue()
|
||||||
|
c.mu.Unlock()
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
if !block {
|
||||||
|
c.mu.Unlock()
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
c.consumerWaiting = true
|
||||||
|
c.mu.Unlock()
|
||||||
|
select {
|
||||||
|
case <-c.ch:
|
||||||
|
case <-c.done:
|
||||||
|
c.finish()
|
||||||
|
return nil, ErrConnClosing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controlBuffer) finish() {
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.err != nil {
|
||||||
|
c.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.err = ErrConnClosing
|
||||||
|
// There may be headers for streams in the control buffer.
|
||||||
|
// These streams need to be cleaned out since the transport
|
||||||
|
// is still not aware of these yet.
|
||||||
|
for head := c.list.dequeueAll(); head != nil; head = head.next {
|
||||||
|
hdr, ok := head.it.(*headerFrame)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hdr.onOrphaned != nil { // It will be nil on the server-side.
|
||||||
|
hdr.onOrphaned(ErrConnClosing)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
type side int
|
||||||
|
|
||||||
|
const (
|
||||||
|
clientSide side = iota
|
||||||
|
serverSide
|
||||||
|
)
|
||||||
|
|
||||||
|
type loopyWriter struct {
|
||||||
|
side side
|
||||||
|
cbuf *controlBuffer
|
||||||
|
sendQuota uint32
|
||||||
|
oiws uint32 // outbound initial window size.
|
||||||
|
estdStreams map[uint32]*outStream // Established streams.
|
||||||
|
activeStreams *outStreamList // Streams that are sending data.
|
||||||
|
framer *framer
|
||||||
|
hBuf *bytes.Buffer // The buffer for HPACK encoding.
|
||||||
|
hEnc *hpack.Encoder // HPACK encoder.
|
||||||
|
bdpEst *bdpEstimator
|
||||||
|
draining bool
|
||||||
|
|
||||||
|
// Side-specific handlers
|
||||||
|
ssGoAwayHandler func(*goAway) (bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
l := &loopyWriter{
|
||||||
|
side: s,
|
||||||
|
cbuf: cbuf,
|
||||||
|
sendQuota: defaultWindowSize,
|
||||||
|
oiws: defaultWindowSize,
|
||||||
|
estdStreams: make(map[uint32]*outStream),
|
||||||
|
activeStreams: newOutStreamList(),
|
||||||
|
framer: fr,
|
||||||
|
hBuf: &buf,
|
||||||
|
hEnc: hpack.NewEncoder(&buf),
|
||||||
|
bdpEst: bdpEst,
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
const minBatchSize = 1000
|
||||||
|
|
||||||
|
// run should be run in a separate goroutine.
|
||||||
|
func (l *loopyWriter) run() {
|
||||||
|
var (
|
||||||
|
it interface{}
|
||||||
|
err error
|
||||||
|
isEmpty bool
|
||||||
|
)
|
||||||
|
defer func() {
|
||||||
|
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||||
|
}()
|
||||||
|
for {
|
||||||
|
it, err = l.cbuf.get(true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = l.handle(it); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = l.processData(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
gosched := true
|
||||||
|
hasdata:
|
||||||
|
for {
|
||||||
|
it, err = l.cbuf.get(false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it != nil {
|
||||||
|
if err = l.handle(it); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = l.processData(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue hasdata
|
||||||
|
}
|
||||||
|
if isEmpty, err = l.processData(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !isEmpty {
|
||||||
|
continue hasdata
|
||||||
|
}
|
||||||
|
if gosched {
|
||||||
|
gosched = false
|
||||||
|
if l.framer.writer.offset < minBatchSize {
|
||||||
|
runtime.Gosched()
|
||||||
|
continue hasdata
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.framer.writer.Flush()
|
||||||
|
break hasdata
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
|
||||||
|
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||||
|
// Otherwise update the quota.
|
||||||
|
if w.streamID == 0 {
|
||||||
|
l.sendQuota += w.increment
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Find the stream and update it.
|
||||||
|
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||||
|
str.bytesOutStanding -= int(w.increment)
|
||||||
|
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||||
|
str.state = active
|
||||||
|
l.activeStreams.enqueue(str)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||||
|
return l.framer.fr.WriteSettings(s.ss...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||||
|
if err := l.applySettings(s.ss); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return l.framer.fr.WriteSettingsAck()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||||
|
if l.side == serverSide {
|
||||||
|
if h.endStream { // Case 1.A: Server wants to close stream.
|
||||||
|
// Make sure it's not a trailers only response.
|
||||||
|
if str, ok := l.estdStreams[h.streamID]; ok {
|
||||||
|
if str.state != empty { // either active or waiting on stream quota.
|
||||||
|
// add it str's list of items.
|
||||||
|
str.itl.enqueue(h)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return l.cleanupStreamHandler(h.cleanup)
|
||||||
|
}
|
||||||
|
// Case 1.B: Server is responding back with headers.
|
||||||
|
str := &outStream{
|
||||||
|
state: empty,
|
||||||
|
itl: &itemList{},
|
||||||
|
wq: h.wq,
|
||||||
|
}
|
||||||
|
l.estdStreams[h.streamID] = str
|
||||||
|
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
|
||||||
|
}
|
||||||
|
// Case 2: Client wants to originate stream.
|
||||||
|
str := &outStream{
|
||||||
|
id: h.streamID,
|
||||||
|
state: empty,
|
||||||
|
itl: &itemList{},
|
||||||
|
wq: h.wq,
|
||||||
|
}
|
||||||
|
str.itl.enqueue(h)
|
||||||
|
return l.originateStream(str)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||||
|
hdr := str.itl.dequeue().(*headerFrame)
|
||||||
|
sendPing, err := hdr.initStream(str.id)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrConnClosing {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Other errors(errStreamDrain) need not close transport.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l.estdStreams[str.id] = str
|
||||||
|
if sendPing {
|
||||||
|
return l.pingHandler(&ping{data: [8]byte{}})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
|
||||||
|
if onWrite != nil {
|
||||||
|
onWrite()
|
||||||
|
}
|
||||||
|
l.hBuf.Reset()
|
||||||
|
for _, f := range hf {
|
||||||
|
if err := l.hEnc.WriteField(f); err != nil {
|
||||||
|
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
endHeaders, first bool
|
||||||
|
)
|
||||||
|
first = true
|
||||||
|
for !endHeaders {
|
||||||
|
size := l.hBuf.Len()
|
||||||
|
if size > http2MaxFrameLen {
|
||||||
|
size = http2MaxFrameLen
|
||||||
|
} else {
|
||||||
|
endHeaders = true
|
||||||
|
}
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
|
||||||
|
StreamID: streamID,
|
||||||
|
BlockFragment: l.hBuf.Next(size),
|
||||||
|
EndStream: endStream,
|
||||||
|
EndHeaders: endHeaders,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
err = l.framer.fr.WriteContinuation(
|
||||||
|
streamID,
|
||||||
|
endHeaders,
|
||||||
|
l.hBuf.Next(size),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||||
|
str, ok := l.estdStreams[df.streamID]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If we got data for a stream it means that
|
||||||
|
// stream was originated and the headers were sent out.
|
||||||
|
str.itl.enqueue(df)
|
||||||
|
if str.state == empty {
|
||||||
|
str.state = active
|
||||||
|
l.activeStreams.enqueue(str)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||||
|
if !p.ack {
|
||||||
|
l.bdpEst.timesnap(p.data)
|
||||||
|
}
|
||||||
|
return l.framer.fr.WritePing(p.ack, p.data)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||||
|
o.resp <- l.sendQuota
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||||
|
c.onWrite()
|
||||||
|
if str, ok := l.estdStreams[c.streamID]; ok {
|
||||||
|
// On the server side it could be a trailers-only response or
|
||||||
|
// a RST_STREAM before stream initialization thus the stream might
|
||||||
|
// not be established yet.
|
||||||
|
delete(l.estdStreams, c.streamID)
|
||||||
|
str.deleteSelf()
|
||||||
|
}
|
||||||
|
if c.rst { // If RST_STREAM needs to be sent.
|
||||||
|
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||||
|
return ErrConnClosing
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||||
|
if l.side == clientSide {
|
||||||
|
l.draining = true
|
||||||
|
if len(l.estdStreams) == 0 {
|
||||||
|
return ErrConnClosing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||||
|
// Handling of outgoing GoAway is very specific to side.
|
||||||
|
if l.ssGoAwayHandler != nil {
|
||||||
|
draining, err := l.ssGoAwayHandler(g)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l.draining = draining
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) handle(i interface{}) error {
|
||||||
|
switch i := i.(type) {
|
||||||
|
case *incomingWindowUpdate:
|
||||||
|
return l.incomingWindowUpdateHandler(i)
|
||||||
|
case *outgoingWindowUpdate:
|
||||||
|
return l.outgoingWindowUpdateHandler(i)
|
||||||
|
case *incomingSettings:
|
||||||
|
return l.incomingSettingsHandler(i)
|
||||||
|
case *outgoingSettings:
|
||||||
|
return l.outgoingSettingsHandler(i)
|
||||||
|
case *headerFrame:
|
||||||
|
return l.headerHandler(i)
|
||||||
|
case *cleanupStream:
|
||||||
|
return l.cleanupStreamHandler(i)
|
||||||
|
case *incomingGoAway:
|
||||||
|
return l.incomingGoAwayHandler(i)
|
||||||
|
case *dataFrame:
|
||||||
|
return l.preprocessData(i)
|
||||||
|
case *ping:
|
||||||
|
return l.pingHandler(i)
|
||||||
|
case *goAway:
|
||||||
|
return l.goAwayHandler(i)
|
||||||
|
case *outFlowControlSizeRequest:
|
||||||
|
return l.outFlowControlSizeRequestHandler(i)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||||
|
for _, s := range ss {
|
||||||
|
switch s.ID {
|
||||||
|
case http2.SettingInitialWindowSize:
|
||||||
|
o := l.oiws
|
||||||
|
l.oiws = s.Val
|
||||||
|
if o < l.oiws {
|
||||||
|
// If the new limit is greater make all depleted streams active.
|
||||||
|
for _, stream := range l.estdStreams {
|
||||||
|
if stream.state == waitingOnStreamQuota {
|
||||||
|
stream.state = active
|
||||||
|
l.activeStreams.enqueue(stream)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) processData() (bool, error) {
|
||||||
|
if l.sendQuota == 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
str := l.activeStreams.dequeue()
|
||||||
|
if str == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
dataItem := str.itl.peek().(*dataFrame)
|
||||||
|
if len(dataItem.h) == 0 && len(dataItem.d) == 0 {
|
||||||
|
// Client sends out empty data frame with endStream = true
|
||||||
|
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
str.itl.dequeue()
|
||||||
|
if str.itl.isEmpty() {
|
||||||
|
str.state = empty
|
||||||
|
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
|
||||||
|
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
l.activeStreams.enqueue(str)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
idx int
|
||||||
|
buf []byte
|
||||||
|
)
|
||||||
|
if len(dataItem.h) != 0 { // data header has not been written out yet.
|
||||||
|
buf = dataItem.h
|
||||||
|
} else {
|
||||||
|
idx = 1
|
||||||
|
buf = dataItem.d
|
||||||
|
}
|
||||||
|
size := http2MaxFrameLen
|
||||||
|
if len(buf) < size {
|
||||||
|
size = len(buf)
|
||||||
|
}
|
||||||
|
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 {
|
||||||
|
str.state = waitingOnStreamQuota
|
||||||
|
return false, nil
|
||||||
|
} else if strQuota < size {
|
||||||
|
size = strQuota
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.sendQuota < uint32(size) {
|
||||||
|
size = int(l.sendQuota)
|
||||||
|
}
|
||||||
|
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||||
|
str.wq.replenish(size)
|
||||||
|
var endStream bool
|
||||||
|
// This last data message on this stream and all
|
||||||
|
// of it can be written in this go.
|
||||||
|
if dataItem.endStream && size == len(buf) {
|
||||||
|
// buf contains either data or it contains header but data is empty.
|
||||||
|
if idx == 1 || len(dataItem.d) == 0 {
|
||||||
|
endStream = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dataItem.onEachWrite != nil {
|
||||||
|
dataItem.onEachWrite()
|
||||||
|
}
|
||||||
|
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
buf = buf[size:]
|
||||||
|
str.bytesOutStanding += size
|
||||||
|
l.sendQuota -= uint32(size)
|
||||||
|
if idx == 0 {
|
||||||
|
dataItem.h = buf
|
||||||
|
} else {
|
||||||
|
dataItem.d = buf
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||||
|
str.itl.dequeue()
|
||||||
|
}
|
||||||
|
if str.itl.isEmpty() {
|
||||||
|
str.state = empty
|
||||||
|
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
|
||||||
|
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
|
||||||
|
str.state = waitingOnStreamQuota
|
||||||
|
} else { // Otherwise add it back to the list of active streams.
|
||||||
|
l.activeStreams.enqueue(str)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
@ -20,13 +20,10 @@ package transport
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/http2"
|
|
||||||
"golang.org/x/net/http2/hpack"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -36,202 +33,109 @@ const (
|
|||||||
initialWindowSize = defaultWindowSize // for an RPC
|
initialWindowSize = defaultWindowSize // for an RPC
|
||||||
infinity = time.Duration(math.MaxInt64)
|
infinity = time.Duration(math.MaxInt64)
|
||||||
defaultClientKeepaliveTime = infinity
|
defaultClientKeepaliveTime = infinity
|
||||||
defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
|
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||||
defaultMaxStreamsClient = 100
|
defaultMaxStreamsClient = 100
|
||||||
defaultMaxConnectionIdle = infinity
|
defaultMaxConnectionIdle = infinity
|
||||||
defaultMaxConnectionAge = infinity
|
defaultMaxConnectionAge = infinity
|
||||||
defaultMaxConnectionAgeGrace = infinity
|
defaultMaxConnectionAgeGrace = infinity
|
||||||
defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
|
defaultServerKeepaliveTime = 2 * time.Hour
|
||||||
defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
|
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||||
defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
|
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||||
// max window limit set by HTTP2 Specs.
|
// max window limit set by HTTP2 Specs.
|
||||||
maxWindowSize = math.MaxInt32
|
maxWindowSize = math.MaxInt32
|
||||||
// defaultLocalSendQuota sets is default value for number of data
|
// defaultWriteQuota is the default value for number of data
|
||||||
// bytes that each stream can schedule before some of it being
|
// bytes that each stream can schedule before some of it being
|
||||||
// flushed out.
|
// flushed out.
|
||||||
defaultLocalSendQuota = 128 * 1024
|
defaultWriteQuota = 64 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following defines various control items which could flow through
|
// writeQuota is a soft limit on the amount of data a stream can
|
||||||
// the control buffer of transport. They represent different aspects of
|
// schedule before some of it is written out.
|
||||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
type writeQuota struct {
|
||||||
|
quota int32
|
||||||
type headerFrame struct {
|
// get waits on read from when quota goes less than or equal to zero.
|
||||||
streamID uint32
|
// replenish writes on it when quota goes positive again.
|
||||||
hf []hpack.HeaderField
|
ch chan struct{}
|
||||||
endStream bool
|
// done is triggered in error case.
|
||||||
|
done <-chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*headerFrame) item() {}
|
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
|
||||||
|
return &writeQuota{
|
||||||
type continuationFrame struct {
|
quota: sz,
|
||||||
streamID uint32
|
ch: make(chan struct{}, 1),
|
||||||
endHeaders bool
|
done: done,
|
||||||
headerBlockFragment []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type dataFrame struct {
|
|
||||||
streamID uint32
|
|
||||||
endStream bool
|
|
||||||
d []byte
|
|
||||||
f func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*dataFrame) item() {}
|
|
||||||
|
|
||||||
func (*continuationFrame) item() {}
|
|
||||||
|
|
||||||
type windowUpdate struct {
|
|
||||||
streamID uint32
|
|
||||||
increment uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*windowUpdate) item() {}
|
|
||||||
|
|
||||||
type settings struct {
|
|
||||||
ss []http2.Setting
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*settings) item() {}
|
|
||||||
|
|
||||||
type settingsAck struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*settingsAck) item() {}
|
|
||||||
|
|
||||||
type resetStream struct {
|
|
||||||
streamID uint32
|
|
||||||
code http2.ErrCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*resetStream) item() {}
|
|
||||||
|
|
||||||
type goAway struct {
|
|
||||||
code http2.ErrCode
|
|
||||||
debugData []byte
|
|
||||||
headsUp bool
|
|
||||||
closeConn bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*goAway) item() {}
|
|
||||||
|
|
||||||
type flushIO struct {
|
|
||||||
closeTr bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*flushIO) item() {}
|
|
||||||
|
|
||||||
type ping struct {
|
|
||||||
ack bool
|
|
||||||
data [8]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*ping) item() {}
|
|
||||||
|
|
||||||
// quotaPool is a pool which accumulates the quota and sends it to acquire()
|
|
||||||
// when it is available.
|
|
||||||
type quotaPool struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
c chan struct{}
|
|
||||||
version uint32
|
|
||||||
quota int
|
|
||||||
}
|
|
||||||
|
|
||||||
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
|
||||||
func newQuotaPool(q int) *quotaPool {
|
|
||||||
qb := "aPool{
|
|
||||||
quota: q,
|
|
||||||
c: make(chan struct{}, 1),
|
|
||||||
}
|
}
|
||||||
return qb
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// add cancels the pending quota sent on acquired, incremented by v and sends
|
func (w *writeQuota) get(sz int32) error {
|
||||||
// it back on acquire.
|
for {
|
||||||
func (qb *quotaPool) add(v int) {
|
if atomic.LoadInt32(&w.quota) > 0 {
|
||||||
qb.mu.Lock()
|
atomic.AddInt32(&w.quota, -sz)
|
||||||
defer qb.mu.Unlock()
|
return nil
|
||||||
qb.lockedAdd(v)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (qb *quotaPool) lockedAdd(v int) {
|
|
||||||
var wakeUp bool
|
|
||||||
if qb.quota <= 0 {
|
|
||||||
wakeUp = true // Wake up potential waiters.
|
|
||||||
}
|
|
||||||
qb.quota += v
|
|
||||||
if wakeUp && qb.quota > 0 {
|
|
||||||
select {
|
select {
|
||||||
case qb.c <- struct{}{}:
|
case <-w.ch:
|
||||||
|
continue
|
||||||
|
case <-w.done:
|
||||||
|
return errStreamDone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writeQuota) replenish(n int) {
|
||||||
|
sz := int32(n)
|
||||||
|
a := atomic.AddInt32(&w.quota, sz)
|
||||||
|
b := a - sz
|
||||||
|
if b <= 0 && a > 0 {
|
||||||
|
select {
|
||||||
|
case w.ch <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (qb *quotaPool) addAndUpdate(v int) {
|
type trInFlow struct {
|
||||||
qb.mu.Lock()
|
limit uint32
|
||||||
qb.lockedAdd(v)
|
unacked uint32
|
||||||
qb.version++
|
effectiveWindowSize uint32
|
||||||
qb.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
|
func (f *trInFlow) newLimit(n uint32) uint32 {
|
||||||
qb.mu.Lock()
|
d := n - f.limit
|
||||||
if qb.quota > 0 {
|
f.limit = n
|
||||||
if v > qb.quota {
|
f.updateEffectiveWindowSize()
|
||||||
v = qb.quota
|
return d
|
||||||
}
|
|
||||||
qb.quota -= v
|
|
||||||
ver := qb.version
|
|
||||||
qb.mu.Unlock()
|
|
||||||
return v, ver, nil
|
|
||||||
}
|
|
||||||
qb.mu.Unlock()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-wc.ctx.Done():
|
|
||||||
return 0, 0, ContextErr(wc.ctx.Err())
|
|
||||||
case <-wc.tctx.Done():
|
|
||||||
return 0, 0, ErrConnClosing
|
|
||||||
case <-wc.done:
|
|
||||||
return 0, 0, io.EOF
|
|
||||||
case <-wc.goAway:
|
|
||||||
return 0, 0, errStreamDrain
|
|
||||||
case <-qb.c:
|
|
||||||
qb.mu.Lock()
|
|
||||||
if qb.quota > 0 {
|
|
||||||
if v > qb.quota {
|
|
||||||
v = qb.quota
|
|
||||||
}
|
|
||||||
qb.quota -= v
|
|
||||||
ver := qb.version
|
|
||||||
if qb.quota > 0 {
|
|
||||||
select {
|
|
||||||
case qb.c <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
qb.mu.Unlock()
|
|
||||||
return v, ver, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
qb.mu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
|
func (f *trInFlow) onData(n uint32) uint32 {
|
||||||
qb.mu.Lock()
|
f.unacked += n
|
||||||
if version == qb.version {
|
if f.unacked >= f.limit/4 {
|
||||||
success()
|
w := f.unacked
|
||||||
qb.mu.Unlock()
|
f.unacked = 0
|
||||||
return true
|
f.updateEffectiveWindowSize()
|
||||||
|
return w
|
||||||
}
|
}
|
||||||
failure()
|
f.updateEffectiveWindowSize()
|
||||||
qb.mu.Unlock()
|
return 0
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *trInFlow) reset() uint32 {
|
||||||
|
w := f.unacked
|
||||||
|
f.unacked = 0
|
||||||
|
f.updateEffectiveWindowSize()
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *trInFlow) updateEffectiveWindowSize() {
|
||||||
|
atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *trInFlow) getSize() uint32 {
|
||||||
|
return atomic.LoadUint32(&f.effectiveWindowSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(mmukhi): Simplify this code.
|
||||||
// inFlow deals with inbound flow control
|
// inFlow deals with inbound flow control
|
||||||
type inFlow struct {
|
type inFlow struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
@ -252,9 +156,9 @@ type inFlow struct {
|
|||||||
// It assumes that n is always greater than the old limit.
|
// It assumes that n is always greater than the old limit.
|
||||||
func (f *inFlow) newLimit(n uint32) uint32 {
|
func (f *inFlow) newLimit(n uint32) uint32 {
|
||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
defer f.mu.Unlock()
|
|
||||||
d := n - f.limit
|
d := n - f.limit
|
||||||
f.limit = n
|
f.limit = n
|
||||||
|
f.mu.Unlock()
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,7 +167,6 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
|||||||
n = uint32(math.MaxInt32)
|
n = uint32(math.MaxInt32)
|
||||||
}
|
}
|
||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
defer f.mu.Unlock()
|
|
||||||
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
|
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
|
||||||
// can send without a window update.
|
// can send without a window update.
|
||||||
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
|
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
|
||||||
@ -275,7 +178,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
|||||||
// for this message. Therefore we must send an update over the limit since there's an active read
|
// for this message. Therefore we must send an update over the limit since there's an active read
|
||||||
// request from the application.
|
// request from the application.
|
||||||
if estUntransmittedData > estSenderQuota {
|
if estUntransmittedData > estSenderQuota {
|
||||||
// Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
|
// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
|
||||||
if f.limit+n > maxWindowSize {
|
if f.limit+n > maxWindowSize {
|
||||||
f.delta = maxWindowSize - f.limit
|
f.delta = maxWindowSize - f.limit
|
||||||
} else {
|
} else {
|
||||||
@ -284,19 +187,24 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
|||||||
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
|
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
|
||||||
f.delta = n
|
f.delta = n
|
||||||
}
|
}
|
||||||
|
f.mu.Unlock()
|
||||||
return f.delta
|
return f.delta
|
||||||
}
|
}
|
||||||
|
f.mu.Unlock()
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// onData is invoked when some data frame is received. It updates pendingData.
|
// onData is invoked when some data frame is received. It updates pendingData.
|
||||||
func (f *inFlow) onData(n uint32) error {
|
func (f *inFlow) onData(n uint32) error {
|
||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
defer f.mu.Unlock()
|
|
||||||
f.pendingData += n
|
f.pendingData += n
|
||||||
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
|
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
|
||||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
|
limit := f.limit
|
||||||
|
rcvd := f.pendingData + f.pendingUpdate
|
||||||
|
f.mu.Unlock()
|
||||||
|
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
|
||||||
}
|
}
|
||||||
|
f.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -304,8 +212,8 @@ func (f *inFlow) onData(n uint32) error {
|
|||||||
// to be sent to the peer.
|
// to be sent to the peer.
|
||||||
func (f *inFlow) onRead(n uint32) uint32 {
|
func (f *inFlow) onRead(n uint32) uint32 {
|
||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
defer f.mu.Unlock()
|
|
||||||
if f.pendingData == 0 {
|
if f.pendingData == 0 {
|
||||||
|
f.mu.Unlock()
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
f.pendingData -= n
|
f.pendingData -= n
|
||||||
@ -320,15 +228,9 @@ func (f *inFlow) onRead(n uint32) uint32 {
|
|||||||
if f.pendingUpdate >= f.limit/4 {
|
if f.pendingUpdate >= f.limit/4 {
|
||||||
wu := f.pendingUpdate
|
wu := f.pendingUpdate
|
||||||
f.pendingUpdate = 0
|
f.pendingUpdate = 0
|
||||||
|
f.mu.Unlock()
|
||||||
return wu
|
return wu
|
||||||
}
|
}
|
||||||
|
f.mu.Unlock()
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *inFlow) resetPendingUpdate() uint32 {
|
|
||||||
f.mu.Lock()
|
|
||||||
defer f.mu.Unlock()
|
|
||||||
n := f.pendingUpdate
|
|
||||||
f.pendingUpdate = 0
|
|
||||||
return n
|
|
||||||
}
|
|
13
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
13
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
@ -92,13 +92,13 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
|||||||
}
|
}
|
||||||
for k, vv := range r.Header {
|
for k, vv := range r.Header {
|
||||||
k = strings.ToLower(k)
|
k = strings.ToLower(k)
|
||||||
if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
|
if isReservedHeader(k) && !isWhitelistedHeader(k) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, v := range vv {
|
for _, v := range vv {
|
||||||
v, err := decodeMetadataHeader(k, v)
|
v, err := decodeMetadataHeader(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
|
return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||||
}
|
}
|
||||||
metakv = append(metakv, k, v)
|
metakv = append(metakv, k, v)
|
||||||
}
|
}
|
||||||
@ -354,8 +354,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||||
}
|
}
|
||||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||||
ctx = peer.NewContext(ctx, pr)
|
s.ctx = peer.NewContext(ctx, pr)
|
||||||
s.ctx = newContextWithStream(ctx, s)
|
|
||||||
if ht.stats != nil {
|
if ht.stats != nil {
|
||||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||||
inHeader := &stats.InHeader{
|
inHeader := &stats.InHeader{
|
||||||
@ -366,7 +365,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||||
}
|
}
|
||||||
s.trReader = &transportReader{
|
s.trReader = &transportReader{
|
||||||
reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
|
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
|
||||||
windowHandler: func(int) {},
|
windowHandler: func(int) {},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -421,6 +420,10 @@ func (ht *serverHandlerTransport) runStream() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) IncrMsgSent() {}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) Drain() {
|
func (ht *serverHandlerTransport) Drain() {
|
||||||
panic("Drain() is not implemented")
|
panic("Drain() is not implemented")
|
||||||
}
|
}
|
||||||
|
1077
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
1077
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
722
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
722
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
File diff suppressed because it is too large
Load Diff
80
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
80
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -70,7 +69,7 @@ var (
|
|||||||
http2.ErrCodeConnect: codes.Internal,
|
http2.ErrCodeConnect: codes.Internal,
|
||||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||||
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
http2.ErrCodeHTTP11Required: codes.Internal,
|
||||||
}
|
}
|
||||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||||
codes.Internal: http2.ErrCodeInternal,
|
codes.Internal: http2.ErrCodeInternal,
|
||||||
@ -132,6 +131,7 @@ func isReservedHeader(hdr string) bool {
|
|||||||
}
|
}
|
||||||
switch hdr {
|
switch hdr {
|
||||||
case "content-type",
|
case "content-type",
|
||||||
|
"user-agent",
|
||||||
"grpc-message-type",
|
"grpc-message-type",
|
||||||
"grpc-encoding",
|
"grpc-encoding",
|
||||||
"grpc-message",
|
"grpc-message",
|
||||||
@ -145,11 +145,11 @@ func isReservedHeader(hdr string) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
|
// isWhitelistedHeader checks whether hdr should be propagated
|
||||||
// that should be propagated into metadata visible to users.
|
// into metadata visible to users.
|
||||||
func isWhitelistedPseudoHeader(hdr string) bool {
|
func isWhitelistedHeader(hdr string) bool {
|
||||||
switch hdr {
|
switch hdr {
|
||||||
case ":authority":
|
case ":authority", "user-agent":
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
@ -262,9 +262,9 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
|
|||||||
// gRPC status doesn't exist and http status is OK.
|
// gRPC status doesn't exist and http status is OK.
|
||||||
// Set rawStatusCode to be unknown and return nil error.
|
// Set rawStatusCode to be unknown and return nil error.
|
||||||
// So that, if the stream has ended this Unknown status
|
// So that, if the stream has ended this Unknown status
|
||||||
// will be propogated to the user.
|
// will be propagated to the user.
|
||||||
// Otherwise, it will be ignored. In which case, status from
|
// Otherwise, it will be ignored. In which case, status from
|
||||||
// a later trailer, that has StreamEnded flag set, is propogated.
|
// a later trailer, that has StreamEnded flag set, is propagated.
|
||||||
code := int(codes.Unknown)
|
code := int(codes.Unknown)
|
||||||
d.rawStatusCode = &code
|
d.rawStatusCode = &code
|
||||||
return nil
|
return nil
|
||||||
@ -283,7 +283,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
|||||||
case "content-type":
|
case "content-type":
|
||||||
contentSubtype, validContentType := contentSubtype(f.Value)
|
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||||
if !validContentType {
|
if !validContentType {
|
||||||
return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
|
return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
|
||||||
}
|
}
|
||||||
d.contentSubtype = contentSubtype
|
d.contentSubtype = contentSubtype
|
||||||
// TODO: do we want to propagate the whole content-type in the metadata,
|
// TODO: do we want to propagate the whole content-type in the metadata,
|
||||||
@ -340,7 +340,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
|||||||
d.statsTrace = v
|
d.statsTrace = v
|
||||||
d.addMetadata(f.Name, string(v))
|
d.addMetadata(f.Name, string(v))
|
||||||
default:
|
default:
|
||||||
if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) {
|
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v, err := decodeMetadataHeader(f.Name, f.Value)
|
v, err := decodeMetadataHeader(f.Name, f.Value)
|
||||||
@ -348,7 +348,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
|||||||
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
d.addMetadata(f.Name, string(v))
|
d.addMetadata(f.Name, v)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -509,19 +509,63 @@ func decodeGrpcMessageUnchecked(msg string) string {
|
|||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type bufWriter struct {
|
||||||
|
buf []byte
|
||||||
|
offset int
|
||||||
|
batchSize int
|
||||||
|
conn net.Conn
|
||||||
|
err error
|
||||||
|
|
||||||
|
onFlush func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||||
|
return &bufWriter{
|
||||||
|
buf: make([]byte, batchSize*2),
|
||||||
|
batchSize: batchSize,
|
||||||
|
conn: conn,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||||
|
if w.err != nil {
|
||||||
|
return 0, w.err
|
||||||
|
}
|
||||||
|
n = copy(w.buf[w.offset:], b)
|
||||||
|
w.offset += n
|
||||||
|
if w.offset >= w.batchSize {
|
||||||
|
err = w.Flush()
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bufWriter) Flush() error {
|
||||||
|
if w.err != nil {
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
if w.offset == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if w.onFlush != nil {
|
||||||
|
w.onFlush()
|
||||||
|
}
|
||||||
|
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||||
|
w.offset = 0
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
|
||||||
type framer struct {
|
type framer struct {
|
||||||
numWriters int32
|
writer *bufWriter
|
||||||
reader io.Reader
|
fr *http2.Framer
|
||||||
writer *bufio.Writer
|
|
||||||
fr *http2.Framer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
||||||
|
r := bufio.NewReaderSize(conn, readBufferSize)
|
||||||
|
w := newBufWriter(conn, writeBufferSize)
|
||||||
f := &framer{
|
f := &framer{
|
||||||
reader: bufio.NewReaderSize(conn, readBufferSize),
|
writer: w,
|
||||||
writer: bufio.NewWriterSize(conn, writeBufferSize),
|
fr: http2.NewFramer(w, r),
|
||||||
}
|
}
|
||||||
f.fr = http2.NewFramer(f.writer, f.reader)
|
|
||||||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||||
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
||||||
f.fr.SetReuseFrames()
|
f.fr.SetReuseFrames()
|
||||||
|
261
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
261
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
@ -19,16 +19,17 @@
|
|||||||
// Package transport defines and implements message oriented communication
|
// Package transport defines and implements message oriented communication
|
||||||
// channel to complete various transactions (e.g., an RPC). It is meant for
|
// channel to complete various transactions (e.g., an RPC). It is meant for
|
||||||
// grpc-internal usage and is not intended to be imported directly by users.
|
// grpc-internal usage and is not intended to be imported directly by users.
|
||||||
package transport // import "google.golang.org/grpc/transport"
|
package transport // externally used as import "google.golang.org/grpc/transport"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/http2"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
@ -57,6 +58,7 @@ type recvBuffer struct {
|
|||||||
c chan recvMsg
|
c chan recvMsg
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
backlog []recvMsg
|
backlog []recvMsg
|
||||||
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRecvBuffer() *recvBuffer {
|
func newRecvBuffer() *recvBuffer {
|
||||||
@ -68,6 +70,13 @@ func newRecvBuffer() *recvBuffer {
|
|||||||
|
|
||||||
func (b *recvBuffer) put(r recvMsg) {
|
func (b *recvBuffer) put(r recvMsg) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
|
if b.err != nil {
|
||||||
|
b.mu.Unlock()
|
||||||
|
// An error had occurred earlier, don't accept more
|
||||||
|
// data or errors.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.err = r.err
|
||||||
if len(b.backlog) == 0 {
|
if len(b.backlog) == 0 {
|
||||||
select {
|
select {
|
||||||
case b.c <- r:
|
case b.c <- r:
|
||||||
@ -101,14 +110,15 @@ func (b *recvBuffer) get() <-chan recvMsg {
|
|||||||
return b.c
|
return b.c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
// recvBufferReader implements io.Reader interface to read the data from
|
// recvBufferReader implements io.Reader interface to read the data from
|
||||||
// recvBuffer.
|
// recvBuffer.
|
||||||
type recvBufferReader struct {
|
type recvBufferReader struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
goAway chan struct{}
|
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||||
recv *recvBuffer
|
recv *recvBuffer
|
||||||
last []byte // Stores the remaining data in the previous calls.
|
last []byte // Stores the remaining data in the previous calls.
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
||||||
@ -130,10 +140,8 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
|||||||
return copied, nil
|
return copied, nil
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-r.ctx.Done():
|
case <-r.ctxDone:
|
||||||
return 0, ContextErr(r.ctx.Err())
|
return 0, ContextErr(r.ctx.Err())
|
||||||
case <-r.goAway:
|
|
||||||
return 0, errStreamDrain
|
|
||||||
case m := <-r.recv.get():
|
case m := <-r.recv.get():
|
||||||
r.recv.load()
|
r.recv.load()
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
@ -145,61 +153,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// All items in an out of a controlBuffer should be the same type.
|
type streamState uint32
|
||||||
type item interface {
|
|
||||||
item()
|
|
||||||
}
|
|
||||||
|
|
||||||
// controlBuffer is an unbounded channel of item.
|
|
||||||
type controlBuffer struct {
|
|
||||||
c chan item
|
|
||||||
mu sync.Mutex
|
|
||||||
backlog []item
|
|
||||||
}
|
|
||||||
|
|
||||||
func newControlBuffer() *controlBuffer {
|
|
||||||
b := &controlBuffer{
|
|
||||||
c: make(chan item, 1),
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *controlBuffer) put(r item) {
|
|
||||||
b.mu.Lock()
|
|
||||||
if len(b.backlog) == 0 {
|
|
||||||
select {
|
|
||||||
case b.c <- r:
|
|
||||||
b.mu.Unlock()
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.backlog = append(b.backlog, r)
|
|
||||||
b.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *controlBuffer) load() {
|
|
||||||
b.mu.Lock()
|
|
||||||
if len(b.backlog) > 0 {
|
|
||||||
select {
|
|
||||||
case b.c <- b.backlog[0]:
|
|
||||||
b.backlog[0] = nil
|
|
||||||
b.backlog = b.backlog[1:]
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// get returns the channel that receives an item in the buffer.
|
|
||||||
//
|
|
||||||
// Upon receipt of an item, the caller should call load to send another
|
|
||||||
// item onto the channel if there is any.
|
|
||||||
func (b *controlBuffer) get() <-chan item {
|
|
||||||
return b.c
|
|
||||||
}
|
|
||||||
|
|
||||||
type streamState uint8
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
streamActive streamState = iota
|
streamActive streamState = iota
|
||||||
@ -214,8 +168,8 @@ type Stream struct {
|
|||||||
st ServerTransport // nil for client side Stream
|
st ServerTransport // nil for client side Stream
|
||||||
ctx context.Context // the associated context of the stream
|
ctx context.Context // the associated context of the stream
|
||||||
cancel context.CancelFunc // always nil for client side Stream
|
cancel context.CancelFunc // always nil for client side Stream
|
||||||
done chan struct{} // closed when the final status arrives
|
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||||
goAway chan struct{} // closed when a GOAWAY control message is received
|
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
|
||||||
method string // the associated RPC method of the stream
|
method string // the associated RPC method of the stream
|
||||||
recvCompress string
|
recvCompress string
|
||||||
sendCompress string
|
sendCompress string
|
||||||
@ -223,47 +177,51 @@ type Stream struct {
|
|||||||
trReader io.Reader
|
trReader io.Reader
|
||||||
fc *inFlow
|
fc *inFlow
|
||||||
recvQuota uint32
|
recvQuota uint32
|
||||||
waiters waiters
|
wq *writeQuota
|
||||||
|
|
||||||
// Callback to state application's intentions to read data. This
|
// Callback to state application's intentions to read data. This
|
||||||
// is used to adjust flow control, if needed.
|
// is used to adjust flow control, if needed.
|
||||||
requestRead func(int)
|
requestRead func(int)
|
||||||
|
|
||||||
sendQuotaPool *quotaPool
|
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||||
headerDone bool // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
header metadata.MD // the received header metadata.
|
||||||
header metadata.MD // the received header metadata.
|
trailer metadata.MD // the key-value map of trailer metadata.
|
||||||
trailer metadata.MD // the key-value map of trailer metadata.
|
|
||||||
|
|
||||||
mu sync.RWMutex // guard the following
|
headerOk bool // becomes true from the first header is about to send
|
||||||
headerOk bool // becomes true from the first header is about to send
|
|
||||||
state streamState
|
state streamState
|
||||||
|
|
||||||
status *status.Status // the status error received from the server
|
status *status.Status // the status error received from the server
|
||||||
|
|
||||||
rstStream bool // indicates whether a RST_STREAM frame needs to be sent
|
bytesReceived uint32 // indicates whether any bytes have been received on this stream
|
||||||
rstError http2.ErrCode // the error that needs to be sent along with the RST_STREAM frame
|
unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
|
||||||
|
|
||||||
bytesReceived bool // indicates whether any bytes have been received on this stream
|
|
||||||
unprocessed bool // set if the server sends a refused stream or GOAWAY including this stream
|
|
||||||
|
|
||||||
// contentSubtype is the content-subtype for requests.
|
// contentSubtype is the content-subtype for requests.
|
||||||
// this must be lowercase or the behavior is undefined.
|
// this must be lowercase or the behavior is undefined.
|
||||||
contentSubtype string
|
contentSubtype string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Stream) swapState(st streamState) streamState {
|
||||||
|
return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
|
||||||
|
return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) getState() streamState {
|
||||||
|
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Stream) waitOnHeader() error {
|
func (s *Stream) waitOnHeader() error {
|
||||||
if s.headerChan == nil {
|
if s.headerChan == nil {
|
||||||
// On the server headerChan is always nil since a stream originates
|
// On the server headerChan is always nil since a stream originates
|
||||||
// only after having received headers.
|
// only after having received headers.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
wc := s.waiters
|
|
||||||
select {
|
select {
|
||||||
case <-wc.ctx.Done():
|
case <-s.ctx.Done():
|
||||||
return ContextErr(wc.ctx.Err())
|
return ContextErr(s.ctx.Err())
|
||||||
case <-wc.goAway:
|
|
||||||
return errStreamDrain
|
|
||||||
case <-s.headerChan:
|
case <-s.headerChan:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -289,12 +247,6 @@ func (s *Stream) Done() <-chan struct{} {
|
|||||||
return s.done
|
return s.done
|
||||||
}
|
}
|
||||||
|
|
||||||
// GoAway returns a channel which is closed when the server sent GoAways signal
|
|
||||||
// before this stream was initiated.
|
|
||||||
func (s *Stream) GoAway() <-chan struct{} {
|
|
||||||
return s.goAway
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header acquires the key-value pairs of header metadata once it
|
// Header acquires the key-value pairs of header metadata once it
|
||||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||||
// header metadata or iii) the stream is canceled/expired.
|
// header metadata or iii) the stream is canceled/expired.
|
||||||
@ -303,6 +255,9 @@ func (s *Stream) Header() (metadata.MD, error) {
|
|||||||
// Even if the stream is closed, header is returned if available.
|
// Even if the stream is closed, header is returned if available.
|
||||||
select {
|
select {
|
||||||
case <-s.headerChan:
|
case <-s.headerChan:
|
||||||
|
if s.header == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return s.header.Copy(), nil
|
return s.header.Copy(), nil
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@ -312,10 +267,10 @@ func (s *Stream) Header() (metadata.MD, error) {
|
|||||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||||
// after the entire stream is done, it could return an empty MD. Client
|
// after the entire stream is done, it could return an empty MD. Client
|
||||||
// side only.
|
// side only.
|
||||||
|
// It can be safely read only after stream has ended that is either read
|
||||||
|
// or write have returned io.EOF.
|
||||||
func (s *Stream) Trailer() metadata.MD {
|
func (s *Stream) Trailer() metadata.MD {
|
||||||
s.mu.RLock()
|
|
||||||
c := s.trailer.Copy()
|
c := s.trailer.Copy()
|
||||||
s.mu.RUnlock()
|
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,36 +300,42 @@ func (s *Stream) Method() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Status returns the status received from the server.
|
// Status returns the status received from the server.
|
||||||
|
// Status can be read safely only after the stream has ended,
|
||||||
|
// that is, read or write has returned io.EOF.
|
||||||
func (s *Stream) Status() *status.Status {
|
func (s *Stream) Status() *status.Status {
|
||||||
return s.status
|
return s.status
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHeader sets the header metadata. This can be called multiple times.
|
// SetHeader sets the header metadata. This can be called multiple times.
|
||||||
// Server side only.
|
// Server side only.
|
||||||
|
// This should not be called in parallel to other data writes.
|
||||||
func (s *Stream) SetHeader(md metadata.MD) error {
|
func (s *Stream) SetHeader(md metadata.MD) error {
|
||||||
s.mu.Lock()
|
|
||||||
if s.headerOk || s.state == streamDone {
|
|
||||||
s.mu.Unlock()
|
|
||||||
return ErrIllegalHeaderWrite
|
|
||||||
}
|
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
s.mu.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if s.headerOk || atomic.LoadUint32((*uint32)(&s.state)) == uint32(streamDone) {
|
||||||
|
return ErrIllegalHeaderWrite
|
||||||
|
}
|
||||||
s.header = metadata.Join(s.header, md)
|
s.header = metadata.Join(s.header, md)
|
||||||
s.mu.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SendHeader sends the given header metadata. The given metadata is
|
||||||
|
// combined with any metadata set by previous calls to SetHeader and
|
||||||
|
// then written to the transport stream.
|
||||||
|
func (s *Stream) SendHeader(md metadata.MD) error {
|
||||||
|
t := s.ServerTransport()
|
||||||
|
return t.WriteHeader(s, md)
|
||||||
|
}
|
||||||
|
|
||||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||||
// by the server. This can be called multiple times. Server side only.
|
// by the server. This can be called multiple times. Server side only.
|
||||||
|
// This should not be called parallel to other data writes.
|
||||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
s.mu.Lock()
|
|
||||||
s.trailer = metadata.Join(s.trailer, md)
|
s.trailer = metadata.Join(s.trailer, md)
|
||||||
s.mu.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,29 +375,15 @@ func (t *transportReader) Read(p []byte) (n int, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish sets the stream's state and status, and closes the done channel.
|
|
||||||
// s.mu must be held by the caller. st must always be non-nil.
|
|
||||||
func (s *Stream) finish(st *status.Status) {
|
|
||||||
s.status = st
|
|
||||||
s.state = streamDone
|
|
||||||
close(s.done)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BytesReceived indicates whether any bytes have been received on this stream.
|
// BytesReceived indicates whether any bytes have been received on this stream.
|
||||||
func (s *Stream) BytesReceived() bool {
|
func (s *Stream) BytesReceived() bool {
|
||||||
s.mu.Lock()
|
return atomic.LoadUint32(&s.bytesReceived) == 1
|
||||||
br := s.bytesReceived
|
|
||||||
s.mu.Unlock()
|
|
||||||
return br
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unprocessed indicates whether the server did not process this stream --
|
// Unprocessed indicates whether the server did not process this stream --
|
||||||
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
||||||
func (s *Stream) Unprocessed() bool {
|
func (s *Stream) Unprocessed() bool {
|
||||||
s.mu.Lock()
|
return atomic.LoadUint32(&s.unprocessed) == 1
|
||||||
br := s.unprocessed
|
|
||||||
s.mu.Unlock()
|
|
||||||
return br
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GoString is implemented by Stream so context.String() won't
|
// GoString is implemented by Stream so context.String() won't
|
||||||
@ -445,21 +392,6 @@ func (s *Stream) GoString() string {
|
|||||||
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The key to save transport.Stream in the context.
|
|
||||||
type streamKey struct{}
|
|
||||||
|
|
||||||
// newContextWithStream creates a new context from ctx and attaches stream
|
|
||||||
// to it.
|
|
||||||
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
|
||||||
return context.WithValue(ctx, streamKey{}, stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamFromContext returns the stream saved in ctx.
|
|
||||||
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
|
||||||
s, ok = ctx.Value(streamKey{}).(*Stream)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// state of transport
|
// state of transport
|
||||||
type transportState int
|
type transportState int
|
||||||
|
|
||||||
@ -481,6 +413,7 @@ type ServerConfig struct {
|
|||||||
InitialConnWindowSize int32
|
InitialConnWindowSize int32
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
|
ChannelzParentID int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||||
@ -516,6 +449,8 @@ type ConnectOptions struct {
|
|||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
|
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||||
|
ChannelzParentID int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// TargetInfo contains the information of the target such as network address and metadata.
|
// TargetInfo contains the information of the target such as network address and metadata.
|
||||||
@ -615,6 +550,12 @@ type ClientTransport interface {
|
|||||||
|
|
||||||
// GetGoAwayReason returns the reason why GoAway frame was received.
|
// GetGoAwayReason returns the reason why GoAway frame was received.
|
||||||
GetGoAwayReason() GoAwayReason
|
GetGoAwayReason() GoAwayReason
|
||||||
|
|
||||||
|
// IncrMsgSent increments the number of message sent through this transport.
|
||||||
|
IncrMsgSent()
|
||||||
|
|
||||||
|
// IncrMsgRecv increments the number of message received through this transport.
|
||||||
|
IncrMsgRecv()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerTransport is the common interface for all gRPC server-side transport
|
// ServerTransport is the common interface for all gRPC server-side transport
|
||||||
@ -648,6 +589,12 @@ type ServerTransport interface {
|
|||||||
|
|
||||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||||
Drain()
|
Drain()
|
||||||
|
|
||||||
|
// IncrMsgSent increments the number of message sent through this transport.
|
||||||
|
IncrMsgSent()
|
||||||
|
|
||||||
|
// IncrMsgRecv increments the number of message received through this transport.
|
||||||
|
IncrMsgRecv()
|
||||||
}
|
}
|
||||||
|
|
||||||
// streamErrorf creates an StreamError with the specified error code and description.
|
// streamErrorf creates an StreamError with the specified error code and description.
|
||||||
@ -701,6 +648,9 @@ var (
|
|||||||
// connection is draining. This could be caused by goaway or balancer
|
// connection is draining. This could be caused by goaway or balancer
|
||||||
// removing the address.
|
// removing the address.
|
||||||
errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
|
errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
|
||||||
|
// errStreamDone is returned from write at the client side to indiacte application
|
||||||
|
// layer of an error.
|
||||||
|
errStreamDone = errors.New("the stream is done")
|
||||||
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
||||||
// stream's ID in unprocessed RPCs.
|
// stream's ID in unprocessed RPCs.
|
||||||
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
||||||
@ -718,15 +668,6 @@ func (e StreamError) Error() string {
|
|||||||
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
|
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// waiters are passed to quotaPool get methods to
|
|
||||||
// wait on in addition to waiting on quota.
|
|
||||||
type waiters struct {
|
|
||||||
ctx context.Context
|
|
||||||
tctx context.Context
|
|
||||||
done chan struct{}
|
|
||||||
goAway chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GoAwayReason contains the reason for the GoAway frame received.
|
// GoAwayReason contains the reason for the GoAway frame received.
|
||||||
type GoAwayReason uint8
|
type GoAwayReason uint8
|
||||||
|
|
||||||
@ -740,39 +681,3 @@ const (
|
|||||||
// "too_many_pings".
|
// "too_many_pings".
|
||||||
GoAwayTooManyPings GoAwayReason = 2
|
GoAwayTooManyPings GoAwayReason = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
// loopyWriter is run in a separate go routine. It is the single code path that will
|
|
||||||
// write data on wire.
|
|
||||||
func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case i := <-cbuf.get():
|
|
||||||
cbuf.load()
|
|
||||||
if err := handler(i); err != nil {
|
|
||||||
errorf("transport: Error while handling item. Err: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hasData:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case i := <-cbuf.get():
|
|
||||||
cbuf.load()
|
|
||||||
if err := handler(i); err != nil {
|
|
||||||
errorf("transport: Error while handling item. Err: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
if err := handler(&flushIO{}); err != nil {
|
|
||||||
errorf("transport: Error while flushing. Err: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
break hasData
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user