vendor: update grpc dependencies
Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
		| @@ -12,7 +12,7 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 | ||||
| github.com/matttproud/golang_protobuf_extensions v1.0.0 | ||||
| github.com/docker/go-units v0.3.1 | ||||
| github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8 | ||||
| github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93 | ||||
| github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4 | ||||
| github.com/opencontainers/runtime-spec v1.0.0-rc5 | ||||
| github.com/opencontainers/runc 639454475cb9c8b861cc599f8bcd5c8c790ae402 | ||||
| github.com/Sirupsen/logrus v0.11.0 | ||||
| @@ -22,8 +22,8 @@ github.com/davecgh/go-spew v1.1.0 | ||||
| github.com/pmezard/go-difflib v1.0.0 | ||||
| github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062 | ||||
| github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca | ||||
| golang.org/x/net 8b4af36cd21a1f85a7484b49feb7c79363106d8e | ||||
| google.golang.org/grpc v1.0.5 | ||||
| golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 | ||||
| google.golang.org/grpc v1.3.0 | ||||
| github.com/pkg/errors v0.8.0 | ||||
| github.com/nightlyone/lockfile 1d49c987357a327b5b03aa84cbddd582c328615d | ||||
| github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 | ||||
| @@ -37,3 +37,5 @@ github.com/Microsoft/go-winio v0.4.1 | ||||
| github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd | ||||
| github.com/Microsoft/hcsshim v0.5.15 | ||||
| github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e | ||||
| google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 | ||||
| golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/golang/protobuf/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/golang/protobuf/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -22,7 +22,7 @@ To use this software, you must: | ||||
|   for details or, if you are using gccgo, follow the instructions at | ||||
| 	https://golang.org/doc/install/gccgo | ||||
| - Grab the code from the repository and install the proto package. | ||||
|   The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. | ||||
|   The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. | ||||
|   The compiler plugin, protoc-gen-go, will be installed in $GOBIN, | ||||
|   defaulting to $GOPATH/bin.  It must be in your $PATH for the protocol | ||||
|   compiler, protoc, to find it. | ||||
|   | ||||
							
								
								
									
										168
									
								
								vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										168
									
								
								vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,168 @@ | ||||
| // Code generated by protoc-gen-go. DO NOT EDIT. | ||||
| // source: github.com/golang/protobuf/ptypes/any/any.proto | ||||
|  | ||||
| /* | ||||
| Package any is a generated protocol buffer package. | ||||
|  | ||||
| It is generated from these files: | ||||
| 	github.com/golang/protobuf/ptypes/any/any.proto | ||||
|  | ||||
| It has these top-level messages: | ||||
| 	Any | ||||
| */ | ||||
| package any | ||||
|  | ||||
| import proto "github.com/golang/protobuf/proto" | ||||
| import fmt "fmt" | ||||
| import math "math" | ||||
|  | ||||
| // Reference imports to suppress errors if they are not otherwise used. | ||||
| var _ = proto.Marshal | ||||
| var _ = fmt.Errorf | ||||
| var _ = math.Inf | ||||
|  | ||||
| // This is a compile-time assertion to ensure that this generated file | ||||
| // is compatible with the proto package it is being compiled against. | ||||
| // A compilation error at this line likely means your copy of the | ||||
| // proto package needs to be updated. | ||||
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | ||||
|  | ||||
| // `Any` contains an arbitrary serialized protocol buffer message along with a | ||||
| // URL that describes the type of the serialized message. | ||||
| // | ||||
| // Protobuf library provides support to pack/unpack Any values in the form | ||||
| // of utility functions or additional generated methods of the Any type. | ||||
| // | ||||
| // Example 1: Pack and unpack a message in C++. | ||||
| // | ||||
| //     Foo foo = ...; | ||||
| //     Any any; | ||||
| //     any.PackFrom(foo); | ||||
| //     ... | ||||
| //     if (any.UnpackTo(&foo)) { | ||||
| //       ... | ||||
| //     } | ||||
| // | ||||
| // Example 2: Pack and unpack a message in Java. | ||||
| // | ||||
| //     Foo foo = ...; | ||||
| //     Any any = Any.pack(foo); | ||||
| //     ... | ||||
| //     if (any.is(Foo.class)) { | ||||
| //       foo = any.unpack(Foo.class); | ||||
| //     } | ||||
| // | ||||
| //  Example 3: Pack and unpack a message in Python. | ||||
| // | ||||
| //     foo = Foo(...) | ||||
| //     any = Any() | ||||
| //     any.Pack(foo) | ||||
| //     ... | ||||
| //     if any.Is(Foo.DESCRIPTOR): | ||||
| //       any.Unpack(foo) | ||||
| //       ... | ||||
| // | ||||
| // The pack methods provided by protobuf library will by default use | ||||
| // 'type.googleapis.com/full.type.name' as the type URL and the unpack | ||||
| // methods only use the fully qualified type name after the last '/' | ||||
| // in the type URL, for example "foo.bar.com/x/y.z" will yield type | ||||
| // name "y.z". | ||||
| // | ||||
| // | ||||
| // JSON | ||||
| // ==== | ||||
| // The JSON representation of an `Any` value uses the regular | ||||
| // representation of the deserialized, embedded message, with an | ||||
| // additional field `@type` which contains the type URL. Example: | ||||
| // | ||||
| //     package google.profile; | ||||
| //     message Person { | ||||
| //       string first_name = 1; | ||||
| //       string last_name = 2; | ||||
| //     } | ||||
| // | ||||
| //     { | ||||
| //       "@type": "type.googleapis.com/google.profile.Person", | ||||
| //       "firstName": <string>, | ||||
| //       "lastName": <string> | ||||
| //     } | ||||
| // | ||||
| // If the embedded message type is well-known and has a custom JSON | ||||
| // representation, that representation will be embedded adding a field | ||||
| // `value` which holds the custom JSON in addition to the `@type` | ||||
| // field. Example (for message [google.protobuf.Duration][]): | ||||
| // | ||||
| //     { | ||||
| //       "@type": "type.googleapis.com/google.protobuf.Duration", | ||||
| //       "value": "1.212s" | ||||
| //     } | ||||
| // | ||||
| type Any struct { | ||||
| 	// A URL/resource name whose content describes the type of the | ||||
| 	// serialized protocol buffer message. | ||||
| 	// | ||||
| 	// For URLs which use the scheme `http`, `https`, or no scheme, the | ||||
| 	// following restrictions and interpretations apply: | ||||
| 	// | ||||
| 	// * If no scheme is provided, `https` is assumed. | ||||
| 	// * The last segment of the URL's path must represent the fully | ||||
| 	//   qualified name of the type (as in `path/google.protobuf.Duration`). | ||||
| 	//   The name should be in a canonical form (e.g., leading "." is | ||||
| 	//   not accepted). | ||||
| 	// * An HTTP GET on the URL must yield a [google.protobuf.Type][] | ||||
| 	//   value in binary format, or produce an error. | ||||
| 	// * Applications are allowed to cache lookup results based on the | ||||
| 	//   URL, or have them precompiled into a binary to avoid any | ||||
| 	//   lookup. Therefore, binary compatibility needs to be preserved | ||||
| 	//   on changes to types. (Use versioned type names to manage | ||||
| 	//   breaking changes.) | ||||
| 	// | ||||
| 	// Schemes other than `http`, `https` (or the empty scheme) might be | ||||
| 	// used with implementation specific semantics. | ||||
| 	// | ||||
| 	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` | ||||
| 	// Must be a valid serialized protocol buffer of the above specified type. | ||||
| 	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *Any) Reset()                    { *m = Any{} } | ||||
| func (m *Any) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Any) ProtoMessage()               {} | ||||
| func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | ||||
| func (*Any) XXX_WellKnownType() string   { return "Any" } | ||||
|  | ||||
| func (m *Any) GetTypeUrl() string { | ||||
| 	if m != nil { | ||||
| 		return m.TypeUrl | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Any) GetValue() []byte { | ||||
| 	if m != nil { | ||||
| 		return m.Value | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func init() { | ||||
| 	proto.RegisterType((*Any)(nil), "google.protobuf.Any") | ||||
| } | ||||
|  | ||||
| func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } | ||||
|  | ||||
| var fileDescriptor0 = []byte{ | ||||
| 	// 184 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, | ||||
| 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, | ||||
| 	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, | ||||
| 	0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, | ||||
| 	0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, | ||||
| 	0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, | ||||
| 	0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, | ||||
| 	0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, | ||||
| 	0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab, | ||||
| 	0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, | ||||
| 	0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, | ||||
| 	0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00, | ||||
| } | ||||
							
								
								
									
										139
									
								
								vendor/github.com/golang/protobuf/ptypes/any/any.proto
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										139
									
								
								vendor/github.com/golang/protobuf/ptypes/any/any.proto
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,139 @@ | ||||
| // Protocol Buffers - Google's data interchange format | ||||
| // Copyright 2008 Google Inc.  All rights reserved. | ||||
| // https://developers.google.com/protocol-buffers/ | ||||
| // | ||||
| // Redistribution and use in source and binary forms, with or without | ||||
| // modification, are permitted provided that the following conditions are | ||||
| // met: | ||||
| // | ||||
| //     * Redistributions of source code must retain the above copyright | ||||
| // notice, this list of conditions and the following disclaimer. | ||||
| //     * Redistributions in binary form must reproduce the above | ||||
| // copyright notice, this list of conditions and the following disclaimer | ||||
| // in the documentation and/or other materials provided with the | ||||
| // distribution. | ||||
| //     * Neither the name of Google Inc. nor the names of its | ||||
| // contributors may be used to endorse or promote products derived from | ||||
| // this software without specific prior written permission. | ||||
| // | ||||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
| syntax = "proto3"; | ||||
|  | ||||
| package google.protobuf; | ||||
|  | ||||
| option csharp_namespace = "Google.Protobuf.WellKnownTypes"; | ||||
| option go_package = "github.com/golang/protobuf/ptypes/any"; | ||||
| option java_package = "com.google.protobuf"; | ||||
| option java_outer_classname = "AnyProto"; | ||||
| option java_multiple_files = true; | ||||
| option objc_class_prefix = "GPB"; | ||||
|  | ||||
| // `Any` contains an arbitrary serialized protocol buffer message along with a | ||||
| // URL that describes the type of the serialized message. | ||||
| // | ||||
| // Protobuf library provides support to pack/unpack Any values in the form | ||||
| // of utility functions or additional generated methods of the Any type. | ||||
| // | ||||
| // Example 1: Pack and unpack a message in C++. | ||||
| // | ||||
| //     Foo foo = ...; | ||||
| //     Any any; | ||||
| //     any.PackFrom(foo); | ||||
| //     ... | ||||
| //     if (any.UnpackTo(&foo)) { | ||||
| //       ... | ||||
| //     } | ||||
| // | ||||
| // Example 2: Pack and unpack a message in Java. | ||||
| // | ||||
| //     Foo foo = ...; | ||||
| //     Any any = Any.pack(foo); | ||||
| //     ... | ||||
| //     if (any.is(Foo.class)) { | ||||
| //       foo = any.unpack(Foo.class); | ||||
| //     } | ||||
| // | ||||
| //  Example 3: Pack and unpack a message in Python. | ||||
| // | ||||
| //     foo = Foo(...) | ||||
| //     any = Any() | ||||
| //     any.Pack(foo) | ||||
| //     ... | ||||
| //     if any.Is(Foo.DESCRIPTOR): | ||||
| //       any.Unpack(foo) | ||||
| //       ... | ||||
| // | ||||
| // The pack methods provided by protobuf library will by default use | ||||
| // 'type.googleapis.com/full.type.name' as the type URL and the unpack | ||||
| // methods only use the fully qualified type name after the last '/' | ||||
| // in the type URL, for example "foo.bar.com/x/y.z" will yield type | ||||
| // name "y.z". | ||||
| // | ||||
| // | ||||
| // JSON | ||||
| // ==== | ||||
| // The JSON representation of an `Any` value uses the regular | ||||
| // representation of the deserialized, embedded message, with an | ||||
| // additional field `@type` which contains the type URL. Example: | ||||
| // | ||||
| //     package google.profile; | ||||
| //     message Person { | ||||
| //       string first_name = 1; | ||||
| //       string last_name = 2; | ||||
| //     } | ||||
| // | ||||
| //     { | ||||
| //       "@type": "type.googleapis.com/google.profile.Person", | ||||
| //       "firstName": <string>, | ||||
| //       "lastName": <string> | ||||
| //     } | ||||
| // | ||||
| // If the embedded message type is well-known and has a custom JSON | ||||
| // representation, that representation will be embedded adding a field | ||||
| // `value` which holds the custom JSON in addition to the `@type` | ||||
| // field. Example (for message [google.protobuf.Duration][]): | ||||
| // | ||||
| //     { | ||||
| //       "@type": "type.googleapis.com/google.protobuf.Duration", | ||||
| //       "value": "1.212s" | ||||
| //     } | ||||
| // | ||||
| message Any { | ||||
|   // A URL/resource name whose content describes the type of the | ||||
|   // serialized protocol buffer message. | ||||
|   // | ||||
|   // For URLs which use the scheme `http`, `https`, or no scheme, the | ||||
|   // following restrictions and interpretations apply: | ||||
|   // | ||||
|   // * If no scheme is provided, `https` is assumed. | ||||
|   // * The last segment of the URL's path must represent the fully | ||||
|   //   qualified name of the type (as in `path/google.protobuf.Duration`). | ||||
|   //   The name should be in a canonical form (e.g., leading "." is | ||||
|   //   not accepted). | ||||
|   // * An HTTP GET on the URL must yield a [google.protobuf.Type][] | ||||
|   //   value in binary format, or produce an error. | ||||
|   // * Applications are allowed to cache lookup results based on the | ||||
|   //   URL, or have them precompiled into a binary to avoid any | ||||
|   //   lookup. Therefore, binary compatibility needs to be preserved | ||||
|   //   on changes to types. (Use versioned type names to manage | ||||
|   //   breaking changes.) | ||||
|   // | ||||
|   // Schemes other than `http`, `https` (or the empty scheme) might be | ||||
|   // used with implementation specific semantics. | ||||
|   // | ||||
|   string type_url = 1; | ||||
|  | ||||
|   // Must be a valid serialized protocol buffer of the above specified type. | ||||
|   bytes value = 2; | ||||
| } | ||||
							
								
								
									
										17
									
								
								vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,6 +1,5 @@ | ||||
| // Code generated by protoc-gen-go. | ||||
| // Code generated by protoc-gen-go. DO NOT EDIT. | ||||
| // source: github.com/golang/protobuf/ptypes/empty/empty.proto | ||||
| // DO NOT EDIT! | ||||
|  | ||||
| /* | ||||
| Package empty is a generated protocol buffer package. | ||||
| @@ -55,15 +54,15 @@ func init() { | ||||
| } | ||||
|  | ||||
| var fileDescriptor0 = []byte{ | ||||
| 	// 150 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, | ||||
| 	// 147 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, | ||||
| 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, | ||||
| 	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd, | ||||
| 	0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, | ||||
| 	0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x25, 0x97, 0x70, 0x72, 0x7e, | ||||
| 	0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, | ||||
| 	0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2, | ||||
| 	0xce, 0x05, 0x8c, 0x8c, 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, | ||||
| 	0x73, 0x87, 0x18, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, | ||||
| 	0x17, 0x02, 0xd2, 0x92, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, | ||||
| 	0xf4, 0x0e, 0xd2, 0x00, 0x00, 0x00, | ||||
| 	0xce, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, | ||||
| 	0x0c, 0x80, 0xaa, 0xd3, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, | ||||
| 	0x4f, 0x62, 0x03, 0x1b, 0x60, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x8e, 0x0a, 0x06, 0xcf, | ||||
| 	0x00, 0x00, 0x00, | ||||
| } | ||||
|   | ||||
							
								
								
									
										1
									
								
								vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -37,7 +37,6 @@ option go_package = "github.com/golang/protobuf/ptypes/empty"; | ||||
| option java_package = "com.google.protobuf"; | ||||
| option java_outer_classname = "EmptyProto"; | ||||
| option java_multiple_files = true; | ||||
| option java_generate_equals_and_hash = true; | ||||
| option objc_class_prefix = "GPB"; | ||||
| option cc_enable_arenas = true; | ||||
|  | ||||
|   | ||||
							
								
								
									
										30
									
								
								vendor/golang.org/x/net/context/context.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										30
									
								
								vendor/golang.org/x/net/context/context.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -7,7 +7,7 @@ | ||||
| // and between processes. | ||||
| // | ||||
| // Incoming requests to a server should create a Context, and outgoing calls to | ||||
| // servers should accept a Context.  The chain of function calls between must | ||||
| // servers should accept a Context. The chain of function calls between must | ||||
| // propagate the Context, optionally replacing it with a modified copy created | ||||
| // using WithDeadline, WithTimeout, WithCancel, or WithValue. | ||||
| // | ||||
| @@ -16,14 +16,14 @@ | ||||
| // propagation: | ||||
| // | ||||
| // Do not store Contexts inside a struct type; instead, pass a Context | ||||
| // explicitly to each function that needs it.  The Context should be the first | ||||
| // explicitly to each function that needs it. The Context should be the first | ||||
| // parameter, typically named ctx: | ||||
| // | ||||
| // 	func DoSomething(ctx context.Context, arg Arg) error { | ||||
| // 		// ... use ctx ... | ||||
| // 	} | ||||
| // | ||||
| // Do not pass a nil Context, even if a function permits it.  Pass context.TODO | ||||
| // Do not pass a nil Context, even if a function permits it. Pass context.TODO | ||||
| // if you are unsure about which Context to use. | ||||
| // | ||||
| // Use context Values only for request-scoped data that transits processes and | ||||
| @@ -44,13 +44,13 @@ import "time" | ||||
| // Context's methods may be called by multiple goroutines simultaneously. | ||||
| type Context interface { | ||||
| 	// Deadline returns the time when work done on behalf of this context | ||||
| 	// should be canceled.  Deadline returns ok==false when no deadline is | ||||
| 	// set.  Successive calls to Deadline return the same results. | ||||
| 	// should be canceled. Deadline returns ok==false when no deadline is | ||||
| 	// set. Successive calls to Deadline return the same results. | ||||
| 	Deadline() (deadline time.Time, ok bool) | ||||
|  | ||||
| 	// Done returns a channel that's closed when work done on behalf of this | ||||
| 	// context should be canceled.  Done may return nil if this context can | ||||
| 	// never be canceled.  Successive calls to Done return the same value. | ||||
| 	// context should be canceled. Done may return nil if this context can | ||||
| 	// never be canceled. Successive calls to Done return the same value. | ||||
| 	// | ||||
| 	// WithCancel arranges for Done to be closed when cancel is called; | ||||
| 	// WithDeadline arranges for Done to be closed when the deadline | ||||
| @@ -79,24 +79,24 @@ type Context interface { | ||||
| 	// a Done channel for cancelation. | ||||
| 	Done() <-chan struct{} | ||||
|  | ||||
| 	// Err returns a non-nil error value after Done is closed.  Err returns | ||||
| 	// Err returns a non-nil error value after Done is closed. Err returns | ||||
| 	// Canceled if the context was canceled or DeadlineExceeded if the | ||||
| 	// context's deadline passed.  No other values for Err are defined. | ||||
| 	// context's deadline passed. No other values for Err are defined. | ||||
| 	// After Done is closed, successive calls to Err return the same value. | ||||
| 	Err() error | ||||
|  | ||||
| 	// Value returns the value associated with this context for key, or nil | ||||
| 	// if no value is associated with key.  Successive calls to Value with | ||||
| 	// if no value is associated with key. Successive calls to Value with | ||||
| 	// the same key returns the same result. | ||||
| 	// | ||||
| 	// Use context values only for request-scoped data that transits | ||||
| 	// processes and API boundaries, not for passing optional parameters to | ||||
| 	// functions. | ||||
| 	// | ||||
| 	// A key identifies a specific value in a Context.  Functions that wish | ||||
| 	// A key identifies a specific value in a Context. Functions that wish | ||||
| 	// to store values in Context typically allocate a key in a global | ||||
| 	// variable then use that key as the argument to context.WithValue and | ||||
| 	// Context.Value.  A key can be any type that supports equality; | ||||
| 	// Context.Value. A key can be any type that supports equality; | ||||
| 	// packages should define keys as an unexported type to avoid | ||||
| 	// collisions. | ||||
| 	// | ||||
| @@ -115,7 +115,7 @@ type Context interface { | ||||
| 	// 	// This prevents collisions with keys defined in other packages. | ||||
| 	// 	type key int | ||||
| 	// | ||||
| 	// 	// userKey is the key for user.User values in Contexts.  It is | ||||
| 	// 	// userKey is the key for user.User values in Contexts. It is | ||||
| 	// 	// unexported; clients use user.NewContext and user.FromContext | ||||
| 	// 	// instead of using this key directly. | ||||
| 	// 	var userKey key = 0 | ||||
| @@ -134,14 +134,14 @@ type Context interface { | ||||
| } | ||||
|  | ||||
| // Background returns a non-nil, empty Context. It is never canceled, has no | ||||
| // values, and has no deadline.  It is typically used by the main function, | ||||
| // values, and has no deadline. It is typically used by the main function, | ||||
| // initialization, and tests, and as the top-level Context for incoming | ||||
| // requests. | ||||
| func Background() Context { | ||||
| 	return background | ||||
| } | ||||
|  | ||||
| // TODO returns a non-nil, empty Context.  Code should use context.TODO when | ||||
| // TODO returns a non-nil, empty Context. Code should use context.TODO when | ||||
| // it's unclear which Context to use or it is not yet available (because the | ||||
| // surrounding function has not yet been extended to accept a Context | ||||
| // parameter).  TODO is recognized by static analysis tools that determine | ||||
|   | ||||
							
								
								
									
										4
									
								
								vendor/golang.org/x/net/context/go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/golang.org/x/net/context/go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -35,8 +35,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { | ||||
| } | ||||
|  | ||||
| // WithDeadline returns a copy of the parent context with the deadline adjusted | ||||
| // to be no later than d.  If the parent's deadline is already earlier than d, | ||||
| // WithDeadline(parent, d) is semantically equivalent to parent.  The returned | ||||
| // to be no later than d. If the parent's deadline is already earlier than d, | ||||
| // WithDeadline(parent, d) is semantically equivalent to parent. The returned | ||||
| // context's Done channel is closed when the deadline expires, when the returned | ||||
| // cancel function is called, or when the parent context's Done channel is | ||||
| // closed, whichever happens first. | ||||
|   | ||||
							
								
								
									
										18
									
								
								vendor/golang.org/x/net/context/pre_go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								vendor/golang.org/x/net/context/pre_go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -13,7 +13,7 @@ import ( | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| // An emptyCtx is never canceled, has no values, and has no deadline.  It is not | ||||
| // An emptyCtx is never canceled, has no values, and has no deadline. It is not | ||||
| // struct{}, since vars of this type must have distinct addresses. | ||||
| type emptyCtx int | ||||
|  | ||||
| @@ -104,7 +104,7 @@ func propagateCancel(parent Context, child canceler) { | ||||
| } | ||||
|  | ||||
| // parentCancelCtx follows a chain of parent references until it finds a | ||||
| // *cancelCtx.  This function understands how each of the concrete types in this | ||||
| // *cancelCtx. This function understands how each of the concrete types in this | ||||
| // package represents its parent. | ||||
| func parentCancelCtx(parent Context) (*cancelCtx, bool) { | ||||
| 	for { | ||||
| @@ -134,14 +134,14 @@ func removeChild(parent Context, child canceler) { | ||||
| 	p.mu.Unlock() | ||||
| } | ||||
|  | ||||
| // A canceler is a context type that can be canceled directly.  The | ||||
| // A canceler is a context type that can be canceled directly. The | ||||
| // implementations are *cancelCtx and *timerCtx. | ||||
| type canceler interface { | ||||
| 	cancel(removeFromParent bool, err error) | ||||
| 	Done() <-chan struct{} | ||||
| } | ||||
|  | ||||
| // A cancelCtx can be canceled.  When canceled, it also cancels any children | ||||
| // A cancelCtx can be canceled. When canceled, it also cancels any children | ||||
| // that implement canceler. | ||||
| type cancelCtx struct { | ||||
| 	Context | ||||
| @@ -193,8 +193,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) { | ||||
| } | ||||
|  | ||||
| // WithDeadline returns a copy of the parent context with the deadline adjusted | ||||
| // to be no later than d.  If the parent's deadline is already earlier than d, | ||||
| // WithDeadline(parent, d) is semantically equivalent to parent.  The returned | ||||
| // to be no later than d. If the parent's deadline is already earlier than d, | ||||
| // WithDeadline(parent, d) is semantically equivalent to parent. The returned | ||||
| // context's Done channel is closed when the deadline expires, when the returned | ||||
| // cancel function is called, or when the parent context's Done channel is | ||||
| // closed, whichever happens first. | ||||
| @@ -226,8 +226,8 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { | ||||
| 	return c, func() { c.cancel(true, Canceled) } | ||||
| } | ||||
|  | ||||
| // A timerCtx carries a timer and a deadline.  It embeds a cancelCtx to | ||||
| // implement Done and Err.  It implements cancel by stopping its timer then | ||||
| // A timerCtx carries a timer and a deadline. It embeds a cancelCtx to | ||||
| // implement Done and Err. It implements cancel by stopping its timer then | ||||
| // delegating to cancelCtx.cancel. | ||||
| type timerCtx struct { | ||||
| 	*cancelCtx | ||||
| @@ -281,7 +281,7 @@ func WithValue(parent Context, key interface{}, val interface{}) Context { | ||||
| 	return &valueCtx{parent, key, val} | ||||
| } | ||||
|  | ||||
| // A valueCtx carries a key-value pair.  It implements Value for that key and | ||||
| // A valueCtx carries a key-value pair. It implements Value for that key and | ||||
| // delegates all other calls to the embedded Context. | ||||
| type valueCtx struct { | ||||
| 	Context | ||||
|   | ||||
							
								
								
									
										641
									
								
								vendor/golang.org/x/net/http2/ciphers.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										641
									
								
								vendor/golang.org/x/net/http2/ciphers.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,641 @@ | ||||
| // Copyright 2017 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| // A list of the possible cipher suite ids. Taken from | ||||
| // http://www.iana.org/assignments/tls-parameters/tls-parameters.txt | ||||
|  | ||||
| const ( | ||||
| 	cipher_TLS_NULL_WITH_NULL_NULL               uint16 = 0x0000 | ||||
| 	cipher_TLS_RSA_WITH_NULL_MD5                 uint16 = 0x0001 | ||||
| 	cipher_TLS_RSA_WITH_NULL_SHA                 uint16 = 0x0002 | ||||
| 	cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5        uint16 = 0x0003 | ||||
| 	cipher_TLS_RSA_WITH_RC4_128_MD5              uint16 = 0x0004 | ||||
| 	cipher_TLS_RSA_WITH_RC4_128_SHA              uint16 = 0x0005 | ||||
| 	cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5    uint16 = 0x0006 | ||||
| 	cipher_TLS_RSA_WITH_IDEA_CBC_SHA             uint16 = 0x0007 | ||||
| 	cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA     uint16 = 0x0008 | ||||
| 	cipher_TLS_RSA_WITH_DES_CBC_SHA              uint16 = 0x0009 | ||||
| 	cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA         uint16 = 0x000A | ||||
| 	cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA  uint16 = 0x000B | ||||
| 	cipher_TLS_DH_DSS_WITH_DES_CBC_SHA           uint16 = 0x000C | ||||
| 	cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA      uint16 = 0x000D | ||||
| 	cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA  uint16 = 0x000E | ||||
| 	cipher_TLS_DH_RSA_WITH_DES_CBC_SHA           uint16 = 0x000F | ||||
| 	cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA      uint16 = 0x0010 | ||||
| 	cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 | ||||
| 	cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA          uint16 = 0x0012 | ||||
| 	cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA     uint16 = 0x0013 | ||||
| 	cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 | ||||
| 	cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA          uint16 = 0x0015 | ||||
| 	cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA     uint16 = 0x0016 | ||||
| 	cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5    uint16 = 0x0017 | ||||
| 	cipher_TLS_DH_anon_WITH_RC4_128_MD5          uint16 = 0x0018 | ||||
| 	cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 | ||||
| 	cipher_TLS_DH_anon_WITH_DES_CBC_SHA          uint16 = 0x001A | ||||
| 	cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA     uint16 = 0x001B | ||||
| 	// Reserved uint16 =  0x001C-1D | ||||
| 	cipher_TLS_KRB5_WITH_DES_CBC_SHA             uint16 = 0x001E | ||||
| 	cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA        uint16 = 0x001F | ||||
| 	cipher_TLS_KRB5_WITH_RC4_128_SHA             uint16 = 0x0020 | ||||
| 	cipher_TLS_KRB5_WITH_IDEA_CBC_SHA            uint16 = 0x0021 | ||||
| 	cipher_TLS_KRB5_WITH_DES_CBC_MD5             uint16 = 0x0022 | ||||
| 	cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5        uint16 = 0x0023 | ||||
| 	cipher_TLS_KRB5_WITH_RC4_128_MD5             uint16 = 0x0024 | ||||
| 	cipher_TLS_KRB5_WITH_IDEA_CBC_MD5            uint16 = 0x0025 | ||||
| 	cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA   uint16 = 0x0026 | ||||
| 	cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA   uint16 = 0x0027 | ||||
| 	cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA       uint16 = 0x0028 | ||||
| 	cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5   uint16 = 0x0029 | ||||
| 	cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5   uint16 = 0x002A | ||||
| 	cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5       uint16 = 0x002B | ||||
| 	cipher_TLS_PSK_WITH_NULL_SHA                 uint16 = 0x002C | ||||
| 	cipher_TLS_DHE_PSK_WITH_NULL_SHA             uint16 = 0x002D | ||||
| 	cipher_TLS_RSA_PSK_WITH_NULL_SHA             uint16 = 0x002E | ||||
| 	cipher_TLS_RSA_WITH_AES_128_CBC_SHA          uint16 = 0x002F | ||||
| 	cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA       uint16 = 0x0030 | ||||
| 	cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA       uint16 = 0x0031 | ||||
| 	cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA      uint16 = 0x0032 | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA      uint16 = 0x0033 | ||||
| 	cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA      uint16 = 0x0034 | ||||
| 	cipher_TLS_RSA_WITH_AES_256_CBC_SHA          uint16 = 0x0035 | ||||
| 	cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA       uint16 = 0x0036 | ||||
| 	cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA       uint16 = 0x0037 | ||||
| 	cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA      uint16 = 0x0038 | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA      uint16 = 0x0039 | ||||
| 	cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA      uint16 = 0x003A | ||||
| 	cipher_TLS_RSA_WITH_NULL_SHA256              uint16 = 0x003B | ||||
| 	cipher_TLS_RSA_WITH_AES_128_CBC_SHA256       uint16 = 0x003C | ||||
| 	cipher_TLS_RSA_WITH_AES_256_CBC_SHA256       uint16 = 0x003D | ||||
| 	cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256    uint16 = 0x003E | ||||
| 	cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256    uint16 = 0x003F | ||||
| 	cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256   uint16 = 0x0040 | ||||
| 	cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA     uint16 = 0x0041 | ||||
| 	cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA  uint16 = 0x0042 | ||||
| 	cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA  uint16 = 0x0043 | ||||
| 	cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 | ||||
| 	cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 | ||||
| 	cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 | ||||
| 	// Reserved uint16 =  0x0047-4F | ||||
| 	// Reserved uint16 =  0x0050-58 | ||||
| 	// Reserved uint16 =  0x0059-5C | ||||
| 	// Unassigned uint16 =  0x005D-5F | ||||
| 	// Reserved uint16 =  0x0060-66 | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 | ||||
| 	cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256  uint16 = 0x0068 | ||||
| 	cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256  uint16 = 0x0069 | ||||
| 	cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B | ||||
| 	cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C | ||||
| 	cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D | ||||
| 	// Unassigned uint16 =  0x006E-83 | ||||
| 	cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA        uint16 = 0x0084 | ||||
| 	cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA     uint16 = 0x0085 | ||||
| 	cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA     uint16 = 0x0086 | ||||
| 	cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA    uint16 = 0x0087 | ||||
| 	cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA    uint16 = 0x0088 | ||||
| 	cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA    uint16 = 0x0089 | ||||
| 	cipher_TLS_PSK_WITH_RC4_128_SHA                 uint16 = 0x008A | ||||
| 	cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA            uint16 = 0x008B | ||||
| 	cipher_TLS_PSK_WITH_AES_128_CBC_SHA             uint16 = 0x008C | ||||
| 	cipher_TLS_PSK_WITH_AES_256_CBC_SHA             uint16 = 0x008D | ||||
| 	cipher_TLS_DHE_PSK_WITH_RC4_128_SHA             uint16 = 0x008E | ||||
| 	cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA        uint16 = 0x008F | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA         uint16 = 0x0090 | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA         uint16 = 0x0091 | ||||
| 	cipher_TLS_RSA_PSK_WITH_RC4_128_SHA             uint16 = 0x0092 | ||||
| 	cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA        uint16 = 0x0093 | ||||
| 	cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA         uint16 = 0x0094 | ||||
| 	cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA         uint16 = 0x0095 | ||||
| 	cipher_TLS_RSA_WITH_SEED_CBC_SHA                uint16 = 0x0096 | ||||
| 	cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA             uint16 = 0x0097 | ||||
| 	cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA             uint16 = 0x0098 | ||||
| 	cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA            uint16 = 0x0099 | ||||
| 	cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA            uint16 = 0x009A | ||||
| 	cipher_TLS_DH_anon_WITH_SEED_CBC_SHA            uint16 = 0x009B | ||||
| 	cipher_TLS_RSA_WITH_AES_128_GCM_SHA256          uint16 = 0x009C | ||||
| 	cipher_TLS_RSA_WITH_AES_256_GCM_SHA384          uint16 = 0x009D | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256      uint16 = 0x009E | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384      uint16 = 0x009F | ||||
| 	cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256       uint16 = 0x00A0 | ||||
| 	cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384       uint16 = 0x00A1 | ||||
| 	cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256      uint16 = 0x00A2 | ||||
| 	cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384      uint16 = 0x00A3 | ||||
| 	cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256       uint16 = 0x00A4 | ||||
| 	cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384       uint16 = 0x00A5 | ||||
| 	cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256      uint16 = 0x00A6 | ||||
| 	cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384      uint16 = 0x00A7 | ||||
| 	cipher_TLS_PSK_WITH_AES_128_GCM_SHA256          uint16 = 0x00A8 | ||||
| 	cipher_TLS_PSK_WITH_AES_256_GCM_SHA384          uint16 = 0x00A9 | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256      uint16 = 0x00AA | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384      uint16 = 0x00AB | ||||
| 	cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256      uint16 = 0x00AC | ||||
| 	cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384      uint16 = 0x00AD | ||||
| 	cipher_TLS_PSK_WITH_AES_128_CBC_SHA256          uint16 = 0x00AE | ||||
| 	cipher_TLS_PSK_WITH_AES_256_CBC_SHA384          uint16 = 0x00AF | ||||
| 	cipher_TLS_PSK_WITH_NULL_SHA256                 uint16 = 0x00B0 | ||||
| 	cipher_TLS_PSK_WITH_NULL_SHA384                 uint16 = 0x00B1 | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256      uint16 = 0x00B2 | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384      uint16 = 0x00B3 | ||||
| 	cipher_TLS_DHE_PSK_WITH_NULL_SHA256             uint16 = 0x00B4 | ||||
| 	cipher_TLS_DHE_PSK_WITH_NULL_SHA384             uint16 = 0x00B5 | ||||
| 	cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256      uint16 = 0x00B6 | ||||
| 	cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384      uint16 = 0x00B7 | ||||
| 	cipher_TLS_RSA_PSK_WITH_NULL_SHA256             uint16 = 0x00B8 | ||||
| 	cipher_TLS_RSA_PSK_WITH_NULL_SHA384             uint16 = 0x00B9 | ||||
| 	cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256     uint16 = 0x00BA | ||||
| 	cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256  uint16 = 0x00BB | ||||
| 	cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256  uint16 = 0x00BC | ||||
| 	cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD | ||||
| 	cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE | ||||
| 	cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF | ||||
| 	cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256     uint16 = 0x00C0 | ||||
| 	cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256  uint16 = 0x00C1 | ||||
| 	cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256  uint16 = 0x00C2 | ||||
| 	cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 | ||||
| 	cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 | ||||
| 	cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 | ||||
| 	// Unassigned uint16 =  0x00C6-FE | ||||
| 	cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF | ||||
| 	// Unassigned uint16 =  0x01-55,* | ||||
| 	cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 | ||||
| 	// Unassigned                                   uint16 = 0x5601 - 0xC000 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA                 uint16 = 0xC001 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA              uint16 = 0xC002 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA         uint16 = 0xC003 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA          uint16 = 0xC004 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA          uint16 = 0xC005 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA                uint16 = 0xC006 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA             uint16 = 0xC007 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA        uint16 = 0xC008 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA         uint16 = 0xC009 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA         uint16 = 0xC00A | ||||
| 	cipher_TLS_ECDH_RSA_WITH_NULL_SHA                   uint16 = 0xC00B | ||||
| 	cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA                uint16 = 0xC00C | ||||
| 	cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA           uint16 = 0xC00D | ||||
| 	cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA            uint16 = 0xC00E | ||||
| 	cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA            uint16 = 0xC00F | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_NULL_SHA                  uint16 = 0xC010 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA               uint16 = 0xC011 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA          uint16 = 0xC012 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA           uint16 = 0xC013 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA           uint16 = 0xC014 | ||||
| 	cipher_TLS_ECDH_anon_WITH_NULL_SHA                  uint16 = 0xC015 | ||||
| 	cipher_TLS_ECDH_anon_WITH_RC4_128_SHA               uint16 = 0xC016 | ||||
| 	cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA          uint16 = 0xC017 | ||||
| 	cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA           uint16 = 0xC018 | ||||
| 	cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA           uint16 = 0xC019 | ||||
| 	cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA            uint16 = 0xC01A | ||||
| 	cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA        uint16 = 0xC01B | ||||
| 	cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA        uint16 = 0xC01C | ||||
| 	cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA             uint16 = 0xC01D | ||||
| 	cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA         uint16 = 0xC01E | ||||
| 	cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA         uint16 = 0xC01F | ||||
| 	cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA             uint16 = 0xC020 | ||||
| 	cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA         uint16 = 0xC021 | ||||
| 	cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA         uint16 = 0xC022 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256      uint16 = 0xC023 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384      uint16 = 0xC024 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256       uint16 = 0xC025 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384       uint16 = 0xC026 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256        uint16 = 0xC027 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384        uint16 = 0xC028 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256         uint16 = 0xC029 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384         uint16 = 0xC02A | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256      uint16 = 0xC02B | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384      uint16 = 0xC02C | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256       uint16 = 0xC02D | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384       uint16 = 0xC02E | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256        uint16 = 0xC02F | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384        uint16 = 0xC030 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256         uint16 = 0xC031 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384         uint16 = 0xC032 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA               uint16 = 0xC033 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA          uint16 = 0xC034 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA           uint16 = 0xC035 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA           uint16 = 0xC036 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256        uint16 = 0xC037 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384        uint16 = 0xC038 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_NULL_SHA                  uint16 = 0xC039 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256               uint16 = 0xC03A | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384               uint16 = 0xC03B | ||||
| 	cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256             uint16 = 0xC03C | ||||
| 	cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384             uint16 = 0xC03D | ||||
| 	cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256          uint16 = 0xC03E | ||||
| 	cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384          uint16 = 0xC03F | ||||
| 	cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256          uint16 = 0xC040 | ||||
| 	cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384          uint16 = 0xC041 | ||||
| 	cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC042 | ||||
| 	cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC043 | ||||
| 	cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC044 | ||||
| 	cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC045 | ||||
| 	cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC046 | ||||
| 	cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC047 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256     uint16 = 0xC048 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384     uint16 = 0xC049 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256      uint16 = 0xC04A | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384      uint16 = 0xC04B | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256       uint16 = 0xC04C | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384       uint16 = 0xC04D | ||||
| 	cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256        uint16 = 0xC04E | ||||
| 	cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384        uint16 = 0xC04F | ||||
| 	cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256             uint16 = 0xC050 | ||||
| 	cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384             uint16 = 0xC051 | ||||
| 	cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC052 | ||||
| 	cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC053 | ||||
| 	cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256          uint16 = 0xC054 | ||||
| 	cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384          uint16 = 0xC055 | ||||
| 	cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC056 | ||||
| 	cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC057 | ||||
| 	cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256          uint16 = 0xC058 | ||||
| 	cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384          uint16 = 0xC059 | ||||
| 	cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC05A | ||||
| 	cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC05B | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256     uint16 = 0xC05C | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384     uint16 = 0xC05D | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256      uint16 = 0xC05E | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384      uint16 = 0xC05F | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256       uint16 = 0xC060 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384       uint16 = 0xC061 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256        uint16 = 0xC062 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384        uint16 = 0xC063 | ||||
| 	cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256             uint16 = 0xC064 | ||||
| 	cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384             uint16 = 0xC065 | ||||
| 	cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC066 | ||||
| 	cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC067 | ||||
| 	cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC068 | ||||
| 	cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC069 | ||||
| 	cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256             uint16 = 0xC06A | ||||
| 	cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384             uint16 = 0xC06B | ||||
| 	cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC06C | ||||
| 	cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC06D | ||||
| 	cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC06E | ||||
| 	cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC06F | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256       uint16 = 0xC070 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384       uint16 = 0xC071 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256  uint16 = 0xC074 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384  uint16 = 0xC075 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256   uint16 = 0xC076 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384   uint16 = 0xC077 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256    uint16 = 0xC078 | ||||
| 	cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384    uint16 = 0xC079 | ||||
| 	cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256         uint16 = 0xC07A | ||||
| 	cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384         uint16 = 0xC07B | ||||
| 	cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC07C | ||||
| 	cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC07D | ||||
| 	cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256      uint16 = 0xC07E | ||||
| 	cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384      uint16 = 0xC07F | ||||
| 	cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC080 | ||||
| 	cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC081 | ||||
| 	cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256      uint16 = 0xC082 | ||||
| 	cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384      uint16 = 0xC083 | ||||
| 	cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC084 | ||||
| 	cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC085 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256  uint16 = 0xC088 | ||||
| 	cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384  uint16 = 0xC089 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256   uint16 = 0xC08A | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384   uint16 = 0xC08B | ||||
| 	cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256    uint16 = 0xC08C | ||||
| 	cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384    uint16 = 0xC08D | ||||
| 	cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256         uint16 = 0xC08E | ||||
| 	cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384         uint16 = 0xC08F | ||||
| 	cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC090 | ||||
| 	cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC091 | ||||
| 	cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC092 | ||||
| 	cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC093 | ||||
| 	cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256         uint16 = 0xC094 | ||||
| 	cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384         uint16 = 0xC095 | ||||
| 	cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256     uint16 = 0xC096 | ||||
| 	cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384     uint16 = 0xC097 | ||||
| 	cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256     uint16 = 0xC098 | ||||
| 	cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384     uint16 = 0xC099 | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256   uint16 = 0xC09A | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384   uint16 = 0xC09B | ||||
| 	cipher_TLS_RSA_WITH_AES_128_CCM                     uint16 = 0xC09C | ||||
| 	cipher_TLS_RSA_WITH_AES_256_CCM                     uint16 = 0xC09D | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_128_CCM                 uint16 = 0xC09E | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_256_CCM                 uint16 = 0xC09F | ||||
| 	cipher_TLS_RSA_WITH_AES_128_CCM_8                   uint16 = 0xC0A0 | ||||
| 	cipher_TLS_RSA_WITH_AES_256_CCM_8                   uint16 = 0xC0A1 | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8               uint16 = 0xC0A2 | ||||
| 	cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8               uint16 = 0xC0A3 | ||||
| 	cipher_TLS_PSK_WITH_AES_128_CCM                     uint16 = 0xC0A4 | ||||
| 	cipher_TLS_PSK_WITH_AES_256_CCM                     uint16 = 0xC0A5 | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_128_CCM                 uint16 = 0xC0A6 | ||||
| 	cipher_TLS_DHE_PSK_WITH_AES_256_CCM                 uint16 = 0xC0A7 | ||||
| 	cipher_TLS_PSK_WITH_AES_128_CCM_8                   uint16 = 0xC0A8 | ||||
| 	cipher_TLS_PSK_WITH_AES_256_CCM_8                   uint16 = 0xC0A9 | ||||
| 	cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8               uint16 = 0xC0AA | ||||
| 	cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8               uint16 = 0xC0AB | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM             uint16 = 0xC0AC | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM             uint16 = 0xC0AD | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8           uint16 = 0xC0AE | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8           uint16 = 0xC0AF | ||||
| 	// Unassigned uint16 =  0xC0B0-FF | ||||
| 	// Unassigned uint16 =  0xC1-CB,* | ||||
| 	// Unassigned uint16 =  0xCC00-A7 | ||||
| 	cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256   uint16 = 0xCCA8 | ||||
| 	cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 | ||||
| 	cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256     uint16 = 0xCCAA | ||||
| 	cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256         uint16 = 0xCCAB | ||||
| 	cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256   uint16 = 0xCCAC | ||||
| 	cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256     uint16 = 0xCCAD | ||||
| 	cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256     uint16 = 0xCCAE | ||||
| ) | ||||
|  | ||||
| // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. | ||||
| // References: | ||||
| // https://tools.ietf.org/html/rfc7540#appendix-A | ||||
| // Reject cipher suites from Appendix A. | ||||
| // "This list includes those cipher suites that do not | ||||
| // offer an ephemeral key exchange and those that are | ||||
| // based on the TLS null, stream or block cipher type" | ||||
| func isBadCipher(cipher uint16) bool { | ||||
| 	switch cipher { | ||||
| 	case cipher_TLS_NULL_WITH_NULL_NULL, | ||||
| 		cipher_TLS_RSA_WITH_NULL_MD5, | ||||
| 		cipher_TLS_RSA_WITH_NULL_SHA, | ||||
| 		cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, | ||||
| 		cipher_TLS_RSA_WITH_RC4_128_MD5, | ||||
| 		cipher_TLS_RSA_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, | ||||
| 		cipher_TLS_RSA_WITH_IDEA_CBC_SHA, | ||||
| 		cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, | ||||
| 		cipher_TLS_RSA_WITH_DES_CBC_SHA, | ||||
| 		cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, | ||||
| 		cipher_TLS_DH_anon_WITH_RC4_128_MD5, | ||||
| 		cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_WITH_DES_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_KRB5_WITH_DES_CBC_SHA, | ||||
| 		cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_KRB5_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, | ||||
| 		cipher_TLS_KRB5_WITH_DES_CBC_MD5, | ||||
| 		cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, | ||||
| 		cipher_TLS_KRB5_WITH_RC4_128_MD5, | ||||
| 		cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, | ||||
| 		cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, | ||||
| 		cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, | ||||
| 		cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, | ||||
| 		cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, | ||||
| 		cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, | ||||
| 		cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, | ||||
| 		cipher_TLS_PSK_WITH_NULL_SHA, | ||||
| 		cipher_TLS_DHE_PSK_WITH_NULL_SHA, | ||||
| 		cipher_TLS_RSA_PSK_WITH_NULL_SHA, | ||||
| 		cipher_TLS_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_RSA_WITH_NULL_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, | ||||
| 		cipher_TLS_PSK_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_PSK_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_PSK_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_RSA_WITH_SEED_CBC_SHA, | ||||
| 		cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, | ||||
| 		cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, | ||||
| 		cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, | ||||
| 		cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, | ||||
| 		cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, | ||||
| 		cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_PSK_WITH_NULL_SHA256, | ||||
| 		cipher_TLS_PSK_WITH_NULL_SHA384, | ||||
| 		cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_DHE_PSK_WITH_NULL_SHA256, | ||||
| 		cipher_TLS_DHE_PSK_WITH_NULL_SHA384, | ||||
| 		cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_RSA_PSK_WITH_NULL_SHA256, | ||||
| 		cipher_TLS_RSA_PSK_WITH_NULL_SHA384, | ||||
| 		cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, | ||||
| 		cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_NULL_SHA, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_anon_WITH_NULL_SHA, | ||||
| 		cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, | ||||
| 		cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, | ||||
| 		cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, | ||||
| 		cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||||
| 		cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||||
| 		cipher_TLS_RSA_WITH_AES_128_CCM, | ||||
| 		cipher_TLS_RSA_WITH_AES_256_CCM, | ||||
| 		cipher_TLS_RSA_WITH_AES_128_CCM_8, | ||||
| 		cipher_TLS_RSA_WITH_AES_256_CCM_8, | ||||
| 		cipher_TLS_PSK_WITH_AES_128_CCM, | ||||
| 		cipher_TLS_PSK_WITH_AES_256_CCM, | ||||
| 		cipher_TLS_PSK_WITH_AES_128_CCM_8, | ||||
| 		cipher_TLS_PSK_WITH_AES_256_CCM_8: | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										2
									
								
								vendor/golang.org/x/net/http2/client_conn_pool.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/golang.org/x/net/http2/client_conn_pool.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -247,7 +247,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { | ||||
| } | ||||
|  | ||||
| // noDialClientConnPool is an implementation of http2.ClientConnPool | ||||
| // which never dials.  We let the HTTP/1.1 client dial and use its TLS | ||||
| // which never dials. We let the HTTP/1.1 client dial and use its TLS | ||||
| // connection instead. | ||||
| type noDialClientConnPool struct{ *clientConnPool } | ||||
|  | ||||
|   | ||||
							
								
								
									
										146
									
								
								vendor/golang.org/x/net/http2/databuffer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										146
									
								
								vendor/golang.org/x/net/http2/databuffer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,146 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // Buffer chunks are allocated from a pool to reduce pressure on GC. | ||||
| // The maximum wasted space per dataBuffer is 2x the largest size class, | ||||
| // which happens when the dataBuffer has multiple chunks and there is | ||||
| // one unread byte in both the first and last chunks. We use a few size | ||||
| // classes to minimize overheads for servers that typically receive very | ||||
| // small request bodies. | ||||
| // | ||||
| // TODO: Benchmark to determine if the pools are necessary. The GC may have | ||||
| // improved enough that we can instead allocate chunks like this: | ||||
| // make([]byte, max(16<<10, expectedBytesRemaining)) | ||||
| var ( | ||||
| 	dataChunkSizeClasses = []int{ | ||||
| 		1 << 10, | ||||
| 		2 << 10, | ||||
| 		4 << 10, | ||||
| 		8 << 10, | ||||
| 		16 << 10, | ||||
| 	} | ||||
| 	dataChunkPools = [...]sync.Pool{ | ||||
| 		{New: func() interface{} { return make([]byte, 1<<10) }}, | ||||
| 		{New: func() interface{} { return make([]byte, 2<<10) }}, | ||||
| 		{New: func() interface{} { return make([]byte, 4<<10) }}, | ||||
| 		{New: func() interface{} { return make([]byte, 8<<10) }}, | ||||
| 		{New: func() interface{} { return make([]byte, 16<<10) }}, | ||||
| 	} | ||||
| ) | ||||
|  | ||||
| func getDataBufferChunk(size int64) []byte { | ||||
| 	i := 0 | ||||
| 	for ; i < len(dataChunkSizeClasses)-1; i++ { | ||||
| 		if size <= int64(dataChunkSizeClasses[i]) { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return dataChunkPools[i].Get().([]byte) | ||||
| } | ||||
|  | ||||
| func putDataBufferChunk(p []byte) { | ||||
| 	for i, n := range dataChunkSizeClasses { | ||||
| 		if len(p) == n { | ||||
| 			dataChunkPools[i].Put(p) | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) | ||||
| } | ||||
|  | ||||
| // dataBuffer is an io.ReadWriter backed by a list of data chunks. | ||||
| // Each dataBuffer is used to read DATA frames on a single stream. | ||||
| // The buffer is divided into chunks so the server can limit the | ||||
| // total memory used by a single connection without limiting the | ||||
| // request body size on any single stream. | ||||
| type dataBuffer struct { | ||||
| 	chunks   [][]byte | ||||
| 	r        int   // next byte to read is chunks[0][r] | ||||
| 	w        int   // next byte to write is chunks[len(chunks)-1][w] | ||||
| 	size     int   // total buffered bytes | ||||
| 	expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) | ||||
| } | ||||
|  | ||||
| var errReadEmpty = errors.New("read from empty dataBuffer") | ||||
|  | ||||
| // Read copies bytes from the buffer into p. | ||||
| // It is an error to read when no data is available. | ||||
| func (b *dataBuffer) Read(p []byte) (int, error) { | ||||
| 	if b.size == 0 { | ||||
| 		return 0, errReadEmpty | ||||
| 	} | ||||
| 	var ntotal int | ||||
| 	for len(p) > 0 && b.size > 0 { | ||||
| 		readFrom := b.bytesFromFirstChunk() | ||||
| 		n := copy(p, readFrom) | ||||
| 		p = p[n:] | ||||
| 		ntotal += n | ||||
| 		b.r += n | ||||
| 		b.size -= n | ||||
| 		// If the first chunk has been consumed, advance to the next chunk. | ||||
| 		if b.r == len(b.chunks[0]) { | ||||
| 			putDataBufferChunk(b.chunks[0]) | ||||
| 			end := len(b.chunks) - 1 | ||||
| 			copy(b.chunks[:end], b.chunks[1:]) | ||||
| 			b.chunks[end] = nil | ||||
| 			b.chunks = b.chunks[:end] | ||||
| 			b.r = 0 | ||||
| 		} | ||||
| 	} | ||||
| 	return ntotal, nil | ||||
| } | ||||
|  | ||||
| func (b *dataBuffer) bytesFromFirstChunk() []byte { | ||||
| 	if len(b.chunks) == 1 { | ||||
| 		return b.chunks[0][b.r:b.w] | ||||
| 	} | ||||
| 	return b.chunks[0][b.r:] | ||||
| } | ||||
|  | ||||
| // Len returns the number of bytes of the unread portion of the buffer. | ||||
| func (b *dataBuffer) Len() int { | ||||
| 	return b.size | ||||
| } | ||||
|  | ||||
| // Write appends p to the buffer. | ||||
| func (b *dataBuffer) Write(p []byte) (int, error) { | ||||
| 	ntotal := len(p) | ||||
| 	for len(p) > 0 { | ||||
| 		// If the last chunk is empty, allocate a new chunk. Try to allocate | ||||
| 		// enough to fully copy p plus any additional bytes we expect to | ||||
| 		// receive. However, this may allocate less than len(p). | ||||
| 		want := int64(len(p)) | ||||
| 		if b.expected > want { | ||||
| 			want = b.expected | ||||
| 		} | ||||
| 		chunk := b.lastChunkOrAlloc(want) | ||||
| 		n := copy(chunk[b.w:], p) | ||||
| 		p = p[n:] | ||||
| 		b.w += n | ||||
| 		b.size += n | ||||
| 		b.expected -= int64(n) | ||||
| 	} | ||||
| 	return ntotal, nil | ||||
| } | ||||
|  | ||||
| func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { | ||||
| 	if len(b.chunks) != 0 { | ||||
| 		last := b.chunks[len(b.chunks)-1] | ||||
| 		if b.w < len(last) { | ||||
| 			return last | ||||
| 		} | ||||
| 	} | ||||
| 	chunk := getDataBufferChunk(want) | ||||
| 	b.chunks = append(b.chunks, chunk) | ||||
| 	b.w = 0 | ||||
| 	return chunk | ||||
| } | ||||
							
								
								
									
										60
									
								
								vendor/golang.org/x/net/http2/fixed_buffer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										60
									
								
								vendor/golang.org/x/net/http2/fixed_buffer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,60 +0,0 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| ) | ||||
|  | ||||
| // fixedBuffer is an io.ReadWriter backed by a fixed size buffer. | ||||
| // It never allocates, but moves old data as new data is written. | ||||
| type fixedBuffer struct { | ||||
| 	buf  []byte | ||||
| 	r, w int | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	errReadEmpty = errors.New("read from empty fixedBuffer") | ||||
| 	errWriteFull = errors.New("write on full fixedBuffer") | ||||
| ) | ||||
|  | ||||
| // Read copies bytes from the buffer into p. | ||||
| // It is an error to read when no data is available. | ||||
| func (b *fixedBuffer) Read(p []byte) (n int, err error) { | ||||
| 	if b.r == b.w { | ||||
| 		return 0, errReadEmpty | ||||
| 	} | ||||
| 	n = copy(p, b.buf[b.r:b.w]) | ||||
| 	b.r += n | ||||
| 	if b.r == b.w { | ||||
| 		b.r = 0 | ||||
| 		b.w = 0 | ||||
| 	} | ||||
| 	return n, nil | ||||
| } | ||||
|  | ||||
| // Len returns the number of bytes of the unread portion of the buffer. | ||||
| func (b *fixedBuffer) Len() int { | ||||
| 	return b.w - b.r | ||||
| } | ||||
|  | ||||
| // Write copies bytes from p into the buffer. | ||||
| // It is an error to write more data than the buffer can hold. | ||||
| func (b *fixedBuffer) Write(p []byte) (n int, err error) { | ||||
| 	// Slide existing data to beginning. | ||||
| 	if b.r > 0 && len(p) > len(b.buf)-b.w { | ||||
| 		copy(b.buf, b.buf[b.r:b.w]) | ||||
| 		b.w -= b.r | ||||
| 		b.r = 0 | ||||
| 	} | ||||
|  | ||||
| 	// Write new data. | ||||
| 	n = copy(b.buf[b.w:], p) | ||||
| 	b.w += n | ||||
| 	if n < len(p) { | ||||
| 		err = errWriteFull | ||||
| 	} | ||||
| 	return n, err | ||||
| } | ||||
							
								
								
									
										110
									
								
								vendor/golang.org/x/net/http2/frame.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										110
									
								
								vendor/golang.org/x/net/http2/frame.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ | ||||
| // a frameParser parses a frame given its FrameHeader and payload | ||||
| // bytes. The length of payload will always equal fh.Length (which | ||||
| // might be 0). | ||||
| type frameParser func(fh FrameHeader, payload []byte) (Frame, error) | ||||
| type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) | ||||
|  | ||||
| var frameParsers = map[FrameType]frameParser{ | ||||
| 	FrameData:         parseDataFrame, | ||||
| @@ -312,15 +312,19 @@ type Framer struct { | ||||
| 	MaxHeaderListSize uint32 | ||||
|  | ||||
| 	// TODO: track which type of frame & with which flags was sent | ||||
| 	// last.  Then return an error (unless AllowIllegalWrites) if | ||||
| 	// last. Then return an error (unless AllowIllegalWrites) if | ||||
| 	// we're in the middle of a header block and a | ||||
| 	// non-Continuation or Continuation on a different stream is | ||||
| 	// attempted to be written. | ||||
|  | ||||
| 	logReads bool | ||||
| 	logReads, logWrites bool | ||||
|  | ||||
| 	debugFramer    *Framer // only use for logging written writes | ||||
| 	debugFramerBuf *bytes.Buffer | ||||
| 	debugFramer       *Framer // only use for logging written writes | ||||
| 	debugFramerBuf    *bytes.Buffer | ||||
| 	debugReadLoggerf  func(string, ...interface{}) | ||||
| 	debugWriteLoggerf func(string, ...interface{}) | ||||
|  | ||||
| 	frameCache *frameCache // nil if frames aren't reused (default) | ||||
| } | ||||
|  | ||||
| func (fr *Framer) maxHeaderListSize() uint32 { | ||||
| @@ -355,7 +359,7 @@ func (f *Framer) endWrite() error { | ||||
| 		byte(length>>16), | ||||
| 		byte(length>>8), | ||||
| 		byte(length)) | ||||
| 	if logFrameWrites { | ||||
| 	if f.logWrites { | ||||
| 		f.logWrite() | ||||
| 	} | ||||
|  | ||||
| @@ -378,10 +382,10 @@ func (f *Framer) logWrite() { | ||||
| 	f.debugFramerBuf.Write(f.wbuf) | ||||
| 	fr, err := f.debugFramer.ReadFrame() | ||||
| 	if err != nil { | ||||
| 		log.Printf("http2: Framer %p: failed to decode just-written frame", f) | ||||
| 		f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) | ||||
| 		return | ||||
| 	} | ||||
| 	log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) | ||||
| 	f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) | ||||
| } | ||||
|  | ||||
| func (f *Framer) writeByte(v byte)     { f.wbuf = append(f.wbuf, v) } | ||||
| @@ -396,12 +400,36 @@ const ( | ||||
| 	maxFrameSize    = 1<<24 - 1 | ||||
| ) | ||||
|  | ||||
| // SetReuseFrames allows the Framer to reuse Frames. | ||||
| // If called on a Framer, Frames returned by calls to ReadFrame are only | ||||
| // valid until the next call to ReadFrame. | ||||
| func (fr *Framer) SetReuseFrames() { | ||||
| 	if fr.frameCache != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	fr.frameCache = &frameCache{} | ||||
| } | ||||
|  | ||||
| type frameCache struct { | ||||
| 	dataFrame DataFrame | ||||
| } | ||||
|  | ||||
| func (fc *frameCache) getDataFrame() *DataFrame { | ||||
| 	if fc == nil { | ||||
| 		return &DataFrame{} | ||||
| 	} | ||||
| 	return &fc.dataFrame | ||||
| } | ||||
|  | ||||
| // NewFramer returns a Framer that writes frames to w and reads them from r. | ||||
| func NewFramer(w io.Writer, r io.Reader) *Framer { | ||||
| 	fr := &Framer{ | ||||
| 		w:        w, | ||||
| 		r:        r, | ||||
| 		logReads: logFrameReads, | ||||
| 		w:                 w, | ||||
| 		r:                 r, | ||||
| 		logReads:          logFrameReads, | ||||
| 		logWrites:         logFrameWrites, | ||||
| 		debugReadLoggerf:  log.Printf, | ||||
| 		debugWriteLoggerf: log.Printf, | ||||
| 	} | ||||
| 	fr.getReadBuf = func(size uint32) []byte { | ||||
| 		if cap(fr.readBuf) >= int(size) { | ||||
| @@ -472,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { | ||||
| 	if _, err := io.ReadFull(fr.r, payload); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	f, err := typeFrameParser(fh.Type)(fh, payload) | ||||
| 	f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) | ||||
| 	if err != nil { | ||||
| 		if ce, ok := err.(connError); ok { | ||||
| 			return nil, fr.connError(ce.Code, ce.Reason) | ||||
| @@ -483,7 +511,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if fr.logReads { | ||||
| 		log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) | ||||
| 		fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) | ||||
| 	} | ||||
| 	if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { | ||||
| 		return fr.readMetaFrame(f.(*HeadersFrame)) | ||||
| @@ -560,7 +588,7 @@ func (f *DataFrame) Data() []byte { | ||||
| 	return f.data | ||||
| } | ||||
|  | ||||
| func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { | ||||
| func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { | ||||
| 	if fh.StreamID == 0 { | ||||
| 		// DATA frames MUST be associated with a stream. If a | ||||
| 		// DATA frame is received whose stream identifier | ||||
| @@ -569,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { | ||||
| 		// PROTOCOL_ERROR. | ||||
| 		return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} | ||||
| 	} | ||||
| 	f := &DataFrame{ | ||||
| 		FrameHeader: fh, | ||||
| 	} | ||||
| 	f := fc.getDataFrame() | ||||
| 	f.FrameHeader = fh | ||||
|  | ||||
| 	var padSize byte | ||||
| 	if fh.Flags.Has(FlagDataPadded) { | ||||
| 		var err error | ||||
| @@ -595,6 +623,7 @@ var ( | ||||
| 	errStreamID    = errors.New("invalid stream ID") | ||||
| 	errDepStreamID = errors.New("invalid dependent stream ID") | ||||
| 	errPadLength   = errors.New("pad length too large") | ||||
| 	errPadBytes    = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") | ||||
| ) | ||||
|  | ||||
| func validStreamIDOrZero(streamID uint32) bool { | ||||
| @@ -618,6 +647,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { | ||||
| // | ||||
| // If pad is nil, the padding bit is not sent. | ||||
| // The length of pad must not exceed 255 bytes. | ||||
| // The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. | ||||
| // | ||||
| // It will perform exactly one Write to the underlying Writer. | ||||
| // It is the caller's responsibility not to violate the maximum frame size | ||||
| @@ -626,8 +656,18 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by | ||||
| 	if !validStreamID(streamID) && !f.AllowIllegalWrites { | ||||
| 		return errStreamID | ||||
| 	} | ||||
| 	if len(pad) > 255 { | ||||
| 		return errPadLength | ||||
| 	if len(pad) > 0 { | ||||
| 		if len(pad) > 255 { | ||||
| 			return errPadLength | ||||
| 		} | ||||
| 		if !f.AllowIllegalWrites { | ||||
| 			for _, b := range pad { | ||||
| 				if b != 0 { | ||||
| 					// "Padding octets MUST be set to zero when sending." | ||||
| 					return errPadBytes | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	var flags Flags | ||||
| 	if endStream { | ||||
| @@ -655,10 +695,10 @@ type SettingsFrame struct { | ||||
| 	p []byte | ||||
| } | ||||
|  | ||||
| func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { | ||||
| func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||||
| 	if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { | ||||
| 		// When this (ACK 0x1) bit is set, the payload of the | ||||
| 		// SETTINGS frame MUST be empty.  Receipt of a | ||||
| 		// SETTINGS frame MUST be empty. Receipt of a | ||||
| 		// SETTINGS frame with the ACK flag set and a length | ||||
| 		// field value other than 0 MUST be treated as a | ||||
| 		// connection error (Section 5.4.1) of type | ||||
| @@ -667,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { | ||||
| 	} | ||||
| 	if fh.StreamID != 0 { | ||||
| 		// SETTINGS frames always apply to a connection, | ||||
| 		// never a single stream.  The stream identifier for a | ||||
| 		// never a single stream. The stream identifier for a | ||||
| 		// SETTINGS frame MUST be zero (0x0).  If an endpoint | ||||
| 		// receives a SETTINGS frame whose stream identifier | ||||
| 		// field is anything other than 0x0, the endpoint MUST | ||||
| @@ -757,7 +797,7 @@ type PingFrame struct { | ||||
|  | ||||
| func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } | ||||
|  | ||||
| func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { | ||||
| func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { | ||||
| 	if len(payload) != 8 { | ||||
| 		return nil, ConnectionError(ErrCodeFrameSize) | ||||
| 	} | ||||
| @@ -797,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte { | ||||
| 	return f.debugData | ||||
| } | ||||
|  | ||||
| func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { | ||||
| func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||||
| 	if fh.StreamID != 0 { | ||||
| 		return nil, ConnectionError(ErrCodeProtocol) | ||||
| 	} | ||||
| @@ -837,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte { | ||||
| 	return f.p | ||||
| } | ||||
|  | ||||
| func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { | ||||
| func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||||
| 	return &UnknownFrame{fh, p}, nil | ||||
| } | ||||
|  | ||||
| @@ -848,7 +888,7 @@ type WindowUpdateFrame struct { | ||||
| 	Increment uint32 // never read with high bit set | ||||
| } | ||||
|  | ||||
| func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { | ||||
| func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||||
| 	if len(p) != 4 { | ||||
| 		return nil, ConnectionError(ErrCodeFrameSize) | ||||
| 	} | ||||
| @@ -913,12 +953,12 @@ func (f *HeadersFrame) HasPriority() bool { | ||||
| 	return f.FrameHeader.Flags.Has(FlagHeadersPriority) | ||||
| } | ||||
|  | ||||
| func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { | ||||
| func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { | ||||
| 	hf := &HeadersFrame{ | ||||
| 		FrameHeader: fh, | ||||
| 	} | ||||
| 	if fh.StreamID == 0 { | ||||
| 		// HEADERS frames MUST be associated with a stream.  If a HEADERS frame | ||||
| 		// HEADERS frames MUST be associated with a stream. If a HEADERS frame | ||||
| 		// is received whose stream identifier field is 0x0, the recipient MUST | ||||
| 		// respond with a connection error (Section 5.4.1) of type | ||||
| 		// PROTOCOL_ERROR. | ||||
| @@ -1040,7 +1080,7 @@ type PriorityParam struct { | ||||
| 	Exclusive bool | ||||
|  | ||||
| 	// Weight is the stream's zero-indexed weight. It should be | ||||
| 	// set together with StreamDep, or neither should be set.  Per | ||||
| 	// set together with StreamDep, or neither should be set. Per | ||||
| 	// the spec, "Add one to the value to obtain a weight between | ||||
| 	// 1 and 256." | ||||
| 	Weight uint8 | ||||
| @@ -1050,7 +1090,7 @@ func (p PriorityParam) IsZero() bool { | ||||
| 	return p == PriorityParam{} | ||||
| } | ||||
|  | ||||
| func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { | ||||
| func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { | ||||
| 	if fh.StreamID == 0 { | ||||
| 		return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} | ||||
| 	} | ||||
| @@ -1097,7 +1137,7 @@ type RSTStreamFrame struct { | ||||
| 	ErrCode ErrCode | ||||
| } | ||||
|  | ||||
| func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { | ||||
| func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||||
| 	if len(p) != 4 { | ||||
| 		return nil, ConnectionError(ErrCodeFrameSize) | ||||
| 	} | ||||
| @@ -1127,7 +1167,7 @@ type ContinuationFrame struct { | ||||
| 	headerFragBuf []byte | ||||
| } | ||||
|  | ||||
| func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { | ||||
| func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||||
| 	if fh.StreamID == 0 { | ||||
| 		return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} | ||||
| 	} | ||||
| @@ -1177,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { | ||||
| 	return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) | ||||
| } | ||||
|  | ||||
| func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { | ||||
| func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { | ||||
| 	pp := &PushPromiseFrame{ | ||||
| 		FrameHeader: fh, | ||||
| 	} | ||||
| @@ -1419,8 +1459,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { | ||||
| 	hdec.SetEmitEnabled(true) | ||||
| 	hdec.SetMaxStringLength(fr.maxHeaderStringLen()) | ||||
| 	hdec.SetEmitFunc(func(hf hpack.HeaderField) { | ||||
| 		if VerboseLogs && logFrameReads { | ||||
| 			log.Printf("http2: decoded hpack field %+v", hf) | ||||
| 		if VerboseLogs && fr.logReads { | ||||
| 			fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) | ||||
| 		} | ||||
| 		if !httplex.ValidHeaderFieldValue(hf.Value) { | ||||
| 			invalid = headerFieldValueError(hf.Value) | ||||
|   | ||||
							
								
								
									
										27
									
								
								vendor/golang.org/x/net/http2/go16.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										27
									
								
								vendor/golang.org/x/net/http2/go16.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -7,7 +7,6 @@ | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"crypto/tls" | ||||
| 	"net/http" | ||||
| 	"time" | ||||
| ) | ||||
| @@ -15,29 +14,3 @@ import ( | ||||
| func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { | ||||
| 	return t1.ExpectContinueTimeout | ||||
| } | ||||
|  | ||||
| // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. | ||||
| func isBadCipher(cipher uint16) bool { | ||||
| 	switch cipher { | ||||
| 	case tls.TLS_RSA_WITH_RC4_128_SHA, | ||||
| 		tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		tls.TLS_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		tls.TLS_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		tls.TLS_RSA_WITH_AES_128_GCM_SHA256, | ||||
| 		tls.TLS_RSA_WITH_AES_256_GCM_SHA384, | ||||
| 		tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, | ||||
| 		tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: | ||||
| 		// Reject cipher suites from Appendix A. | ||||
| 		// "This list includes those cipher suites that do not | ||||
| 		// offer an ephemeral key exchange and those that are | ||||
| 		// based on the TLS null, stream or block cipher type" | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
|   | ||||
							
								
								
									
										47
									
								
								vendor/golang.org/x/net/http2/go18.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										47
									
								
								vendor/golang.org/x/net/http2/go18.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -6,6 +6,49 @@ | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import "crypto/tls" | ||||
| import ( | ||||
| 	"crypto/tls" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| ) | ||||
|  | ||||
| func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } | ||||
| func cloneTLSConfig(c *tls.Config) *tls.Config { | ||||
| 	c2 := c.Clone() | ||||
| 	c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 | ||||
| 	return c2 | ||||
| } | ||||
|  | ||||
| var _ http.Pusher = (*responseWriter)(nil) | ||||
|  | ||||
| // Push implements http.Pusher. | ||||
| func (w *responseWriter) Push(target string, opts *http.PushOptions) error { | ||||
| 	internalOpts := pushOptions{} | ||||
| 	if opts != nil { | ||||
| 		internalOpts.Method = opts.Method | ||||
| 		internalOpts.Header = opts.Header | ||||
| 	} | ||||
| 	return w.push(target, internalOpts) | ||||
| } | ||||
|  | ||||
| func configureServer18(h1 *http.Server, h2 *Server) error { | ||||
| 	if h2.IdleTimeout == 0 { | ||||
| 		if h1.IdleTimeout != 0 { | ||||
| 			h2.IdleTimeout = h1.IdleTimeout | ||||
| 		} else { | ||||
| 			h2.IdleTimeout = h1.ReadTimeout | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func shouldLogPanic(panicValue interface{}) bool { | ||||
| 	return panicValue != nil && panicValue != http.ErrAbortHandler | ||||
| } | ||||
|  | ||||
| func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { | ||||
| 	return req.GetBody | ||||
| } | ||||
|  | ||||
| func reqBodyIsNoBody(body io.ReadCloser) bool { | ||||
| 	return body == http.NoBody | ||||
| } | ||||
|   | ||||
							
								
								
									
										16
									
								
								vendor/golang.org/x/net/http2/go19.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								vendor/golang.org/x/net/http2/go19.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build go1.9 | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"net/http" | ||||
| ) | ||||
|  | ||||
| func configureServer19(s *http.Server, conf *Server) error { | ||||
| 	s.RegisterOnShutdown(conf.state.startGracefulShutdown) | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/golang.org/x/net/http2/hpack/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								vendor/golang.org/x/net/http2/hpack/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder { | ||||
| 		tableSizeUpdate: false, | ||||
| 		w:               w, | ||||
| 	} | ||||
| 	e.dynTab.table.init() | ||||
| 	e.dynTab.setMaxSize(initialHeaderTableSize) | ||||
| 	return e | ||||
| } | ||||
|  | ||||
| // WriteField encodes f into a single Write to e's underlying Writer. | ||||
| // This function may also produce bytes for "Header Table Size Update" | ||||
| // if necessary.  If produced, it is done before encoding f. | ||||
| // if necessary. If produced, it is done before encoding f. | ||||
| func (e *Encoder) WriteField(f HeaderField) error { | ||||
| 	e.buf = e.buf[:0] | ||||
|  | ||||
| @@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error { | ||||
| // only name matches, i points to that index and nameValueMatch | ||||
| // becomes false. | ||||
| func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { | ||||
| 	for idx, hf := range staticTable { | ||||
| 		if !constantTimeStringCompare(hf.Name, f.Name) { | ||||
| 			continue | ||||
| 		} | ||||
| 		if i == 0 { | ||||
| 			i = uint64(idx + 1) | ||||
| 		} | ||||
| 		if f.Sensitive { | ||||
| 			continue | ||||
| 		} | ||||
| 		if !constantTimeStringCompare(hf.Value, f.Value) { | ||||
| 			continue | ||||
| 		} | ||||
| 		i = uint64(idx + 1) | ||||
| 		nameValueMatch = true | ||||
| 		return | ||||
| 	i, nameValueMatch = staticTable.search(f) | ||||
| 	if nameValueMatch { | ||||
| 		return i, true | ||||
| 	} | ||||
|  | ||||
| 	j, nameValueMatch := e.dynTab.search(f) | ||||
| 	j, nameValueMatch := e.dynTab.table.search(f) | ||||
| 	if nameValueMatch || (i == 0 && j != 0) { | ||||
| 		i = j + uint64(len(staticTable)) | ||||
| 		return j + uint64(staticTable.len()), nameValueMatch | ||||
| 	} | ||||
| 	return | ||||
|  | ||||
| 	return i, false | ||||
| } | ||||
|  | ||||
| // SetMaxDynamicTableSize changes the dynamic header table size to v. | ||||
|   | ||||
							
								
								
									
										104
									
								
								vendor/golang.org/x/net/http2/hpack/hpack.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										104
									
								
								vendor/golang.org/x/net/http2/hpack/hpack.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -61,7 +61,7 @@ func (hf HeaderField) String() string { | ||||
| func (hf HeaderField) Size() uint32 { | ||||
| 	// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 | ||||
| 	// "The size of the dynamic table is the sum of the size of | ||||
| 	// its entries.  The size of an entry is the sum of its name's | ||||
| 	// its entries. The size of an entry is the sum of its name's | ||||
| 	// length in octets (as defined in Section 5.2), its value's | ||||
| 	// length in octets (see Section 5.2), plus 32.  The size of | ||||
| 	// an entry is calculated using the length of the name and | ||||
| @@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod | ||||
| 		emit:        emitFunc, | ||||
| 		emitEnabled: true, | ||||
| 	} | ||||
| 	d.dynTab.table.init() | ||||
| 	d.dynTab.allowedMaxSize = maxDynamicTableSize | ||||
| 	d.dynTab.setMaxSize(maxDynamicTableSize) | ||||
| 	return d | ||||
| @@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { | ||||
| } | ||||
|  | ||||
| type dynamicTable struct { | ||||
| 	// ents is the FIFO described at | ||||
| 	// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 | ||||
| 	// The newest (low index) is append at the end, and items are | ||||
| 	// evicted from the front. | ||||
| 	ents           []HeaderField | ||||
| 	size           uint32 | ||||
| 	table          headerFieldTable | ||||
| 	size           uint32 // in bytes | ||||
| 	maxSize        uint32 // current maxSize | ||||
| 	allowedMaxSize uint32 // maxSize may go up to this, inclusive | ||||
| } | ||||
| @@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) { | ||||
| 	dt.evict() | ||||
| } | ||||
|  | ||||
| // TODO: change dynamicTable to be a struct with a slice and a size int field, | ||||
| // per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: | ||||
| // | ||||
| // | ||||
| // Then make add increment the size. maybe the max size should move from Decoder to | ||||
| // dynamicTable and add should return an ok bool if there was enough space. | ||||
| // | ||||
| // Later we'll need a remove operation on dynamicTable. | ||||
|  | ||||
| func (dt *dynamicTable) add(f HeaderField) { | ||||
| 	dt.ents = append(dt.ents, f) | ||||
| 	dt.table.addEntry(f) | ||||
| 	dt.size += f.Size() | ||||
| 	dt.evict() | ||||
| } | ||||
|  | ||||
| // If we're too big, evict old stuff (front of the slice) | ||||
| // If we're too big, evict old stuff. | ||||
| func (dt *dynamicTable) evict() { | ||||
| 	base := dt.ents // keep base pointer of slice | ||||
| 	for dt.size > dt.maxSize { | ||||
| 		dt.size -= dt.ents[0].Size() | ||||
| 		dt.ents = dt.ents[1:] | ||||
| 	var n int | ||||
| 	for dt.size > dt.maxSize && n < dt.table.len() { | ||||
| 		dt.size -= dt.table.ents[n].Size() | ||||
| 		n++ | ||||
| 	} | ||||
|  | ||||
| 	// Shift slice contents down if we evicted things. | ||||
| 	if len(dt.ents) != len(base) { | ||||
| 		copy(base, dt.ents) | ||||
| 		dt.ents = base[:len(dt.ents)] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // constantTimeStringCompare compares string a and b in a constant | ||||
| // time manner. | ||||
| func constantTimeStringCompare(a, b string) bool { | ||||
| 	if len(a) != len(b) { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	c := byte(0) | ||||
|  | ||||
| 	for i := 0; i < len(a); i++ { | ||||
| 		c |= a[i] ^ b[i] | ||||
| 	} | ||||
|  | ||||
| 	return c == 0 | ||||
| } | ||||
|  | ||||
| // Search searches f in the table. The return value i is 0 if there is | ||||
| // no name match. If there is name match or name/value match, i is the | ||||
| // index of that entry (1-based). If both name and value match, | ||||
| // nameValueMatch becomes true. | ||||
| func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { | ||||
| 	l := len(dt.ents) | ||||
| 	for j := l - 1; j >= 0; j-- { | ||||
| 		ent := dt.ents[j] | ||||
| 		if !constantTimeStringCompare(ent.Name, f.Name) { | ||||
| 			continue | ||||
| 		} | ||||
| 		if i == 0 { | ||||
| 			i = uint64(l - j) | ||||
| 		} | ||||
| 		if f.Sensitive { | ||||
| 			continue | ||||
| 		} | ||||
| 		if !constantTimeStringCompare(ent.Value, f.Value) { | ||||
| 			continue | ||||
| 		} | ||||
| 		i = uint64(l - j) | ||||
| 		nameValueMatch = true | ||||
| 		return | ||||
| 	} | ||||
| 	return | ||||
| 	dt.table.evictOldest(n) | ||||
| } | ||||
|  | ||||
| func (d *Decoder) maxTableIndex() int { | ||||
| 	return len(d.dynTab.ents) + len(staticTable) | ||||
| 	// This should never overflow. RFC 7540 Section 6.5.2 limits the size of | ||||
| 	// the dynamic table to 2^32 bytes, where each entry will occupy more than | ||||
| 	// one byte. Further, the staticTable has a fixed, small length. | ||||
| 	return d.dynTab.table.len() + staticTable.len() | ||||
| } | ||||
|  | ||||
| func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { | ||||
| 	if i < 1 { | ||||
| 	// See Section 2.3.3. | ||||
| 	if i == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	if i <= uint64(staticTable.len()) { | ||||
| 		return staticTable.ents[i-1], true | ||||
| 	} | ||||
| 	if i > uint64(d.maxTableIndex()) { | ||||
| 		return | ||||
| 	} | ||||
| 	if i <= uint64(len(staticTable)) { | ||||
| 		return staticTable[i-1], true | ||||
| 	} | ||||
| 	dents := d.dynTab.ents | ||||
| 	return dents[len(dents)-(int(i)-len(staticTable))], true | ||||
| 	// In the dynamic table, newer entries have lower indices. | ||||
| 	// However, dt.ents[0] is the oldest entry. Hence, dt.ents is | ||||
| 	// the reversed dynamic table. | ||||
| 	dt := d.dynTab.table | ||||
| 	return dt.ents[dt.len()-(int(i)-staticTable.len())], true | ||||
| } | ||||
|  | ||||
| // Decode decodes an entire block. | ||||
| @@ -307,7 +255,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) { | ||||
| 		err = d.parseHeaderFieldRepr() | ||||
| 		if err == errNeedMore { | ||||
| 			// Extra paranoia, making sure saveBuf won't | ||||
| 			// get too large.  All the varint and string | ||||
| 			// get too large. All the varint and string | ||||
| 			// reading code earlier should already catch | ||||
| 			// overlong things and return ErrStringLength, | ||||
| 			// but keep this as a last resort. | ||||
|   | ||||
							
								
								
									
										255
									
								
								vendor/golang.org/x/net/http2/hpack/tables.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										255
									
								
								vendor/golang.org/x/net/http2/hpack/tables.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -4,73 +4,200 @@ | ||||
|  | ||||
| package hpack | ||||
|  | ||||
| func pair(name, value string) HeaderField { | ||||
| 	return HeaderField{Name: name, Value: value} | ||||
| import ( | ||||
| 	"fmt" | ||||
| ) | ||||
|  | ||||
| // headerFieldTable implements a list of HeaderFields. | ||||
| // This is used to implement the static and dynamic tables. | ||||
| type headerFieldTable struct { | ||||
| 	// For static tables, entries are never evicted. | ||||
| 	// | ||||
| 	// For dynamic tables, entries are evicted from ents[0] and added to the end. | ||||
| 	// Each entry has a unique id that starts at one and increments for each | ||||
| 	// entry that is added. This unique id is stable across evictions, meaning | ||||
| 	// it can be used as a pointer to a specific entry. As in hpack, unique ids | ||||
| 	// are 1-based. The unique id for ents[k] is k + evictCount + 1. | ||||
| 	// | ||||
| 	// Zero is not a valid unique id. | ||||
| 	// | ||||
| 	// evictCount should not overflow in any remotely practical situation. In | ||||
| 	// practice, we will have one dynamic table per HTTP/2 connection. If we | ||||
| 	// assume a very powerful server that handles 1M QPS per connection and each | ||||
| 	// request adds (then evicts) 100 entries from the table, it would still take | ||||
| 	// 2M years for evictCount to overflow. | ||||
| 	ents       []HeaderField | ||||
| 	evictCount uint64 | ||||
|  | ||||
| 	// byName maps a HeaderField name to the unique id of the newest entry with | ||||
| 	// the same name. See above for a definition of "unique id". | ||||
| 	byName map[string]uint64 | ||||
|  | ||||
| 	// byNameValue maps a HeaderField name/value pair to the unique id of the newest | ||||
| 	// entry with the same name and value. See above for a definition of "unique id". | ||||
| 	byNameValue map[pairNameValue]uint64 | ||||
| } | ||||
|  | ||||
| type pairNameValue struct { | ||||
| 	name, value string | ||||
| } | ||||
|  | ||||
| func (t *headerFieldTable) init() { | ||||
| 	t.byName = make(map[string]uint64) | ||||
| 	t.byNameValue = make(map[pairNameValue]uint64) | ||||
| } | ||||
|  | ||||
| // len reports the number of entries in the table. | ||||
| func (t *headerFieldTable) len() int { | ||||
| 	return len(t.ents) | ||||
| } | ||||
|  | ||||
| // addEntry adds a new entry. | ||||
| func (t *headerFieldTable) addEntry(f HeaderField) { | ||||
| 	id := uint64(t.len()) + t.evictCount + 1 | ||||
| 	t.byName[f.Name] = id | ||||
| 	t.byNameValue[pairNameValue{f.Name, f.Value}] = id | ||||
| 	t.ents = append(t.ents, f) | ||||
| } | ||||
|  | ||||
| // evictOldest evicts the n oldest entries in the table. | ||||
| func (t *headerFieldTable) evictOldest(n int) { | ||||
| 	if n > t.len() { | ||||
| 		panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) | ||||
| 	} | ||||
| 	for k := 0; k < n; k++ { | ||||
| 		f := t.ents[k] | ||||
| 		id := t.evictCount + uint64(k) + 1 | ||||
| 		if t.byName[f.Name] == id { | ||||
| 			delete(t.byName, f.Name) | ||||
| 		} | ||||
| 		if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { | ||||
| 			delete(t.byNameValue, p) | ||||
| 		} | ||||
| 	} | ||||
| 	copy(t.ents, t.ents[n:]) | ||||
| 	for k := t.len() - n; k < t.len(); k++ { | ||||
| 		t.ents[k] = HeaderField{} // so strings can be garbage collected | ||||
| 	} | ||||
| 	t.ents = t.ents[:t.len()-n] | ||||
| 	if t.evictCount+uint64(n) < t.evictCount { | ||||
| 		panic("evictCount overflow") | ||||
| 	} | ||||
| 	t.evictCount += uint64(n) | ||||
| } | ||||
|  | ||||
| // search finds f in the table. If there is no match, i is 0. | ||||
| // If both name and value match, i is the matched index and nameValueMatch | ||||
| // becomes true. If only name matches, i points to that index and | ||||
| // nameValueMatch becomes false. | ||||
| // | ||||
| // The returned index is a 1-based HPACK index. For dynamic tables, HPACK says | ||||
| // that index 1 should be the newest entry, but t.ents[0] is the oldest entry, | ||||
| // meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic | ||||
| // table, the return value i actually refers to the entry t.ents[t.len()-i]. | ||||
| // | ||||
| // All tables are assumed to be a dynamic tables except for the global | ||||
| // staticTable pointer. | ||||
| // | ||||
| // See Section 2.3.3. | ||||
| func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { | ||||
| 	if !f.Sensitive { | ||||
| 		if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { | ||||
| 			return t.idToIndex(id), true | ||||
| 		} | ||||
| 	} | ||||
| 	if id := t.byName[f.Name]; id != 0 { | ||||
| 		return t.idToIndex(id), false | ||||
| 	} | ||||
| 	return 0, false | ||||
| } | ||||
|  | ||||
| // idToIndex converts a unique id to an HPACK index. | ||||
| // See Section 2.3.3. | ||||
| func (t *headerFieldTable) idToIndex(id uint64) uint64 { | ||||
| 	if id <= t.evictCount { | ||||
| 		panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) | ||||
| 	} | ||||
| 	k := id - t.evictCount - 1 // convert id to an index t.ents[k] | ||||
| 	if t != staticTable { | ||||
| 		return uint64(t.len()) - k // dynamic table | ||||
| 	} | ||||
| 	return k + 1 | ||||
| } | ||||
|  | ||||
| // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B | ||||
| var staticTable = [...]HeaderField{ | ||||
| 	pair(":authority", ""), // index 1 (1-based) | ||||
| 	pair(":method", "GET"), | ||||
| 	pair(":method", "POST"), | ||||
| 	pair(":path", "/"), | ||||
| 	pair(":path", "/index.html"), | ||||
| 	pair(":scheme", "http"), | ||||
| 	pair(":scheme", "https"), | ||||
| 	pair(":status", "200"), | ||||
| 	pair(":status", "204"), | ||||
| 	pair(":status", "206"), | ||||
| 	pair(":status", "304"), | ||||
| 	pair(":status", "400"), | ||||
| 	pair(":status", "404"), | ||||
| 	pair(":status", "500"), | ||||
| 	pair("accept-charset", ""), | ||||
| 	pair("accept-encoding", "gzip, deflate"), | ||||
| 	pair("accept-language", ""), | ||||
| 	pair("accept-ranges", ""), | ||||
| 	pair("accept", ""), | ||||
| 	pair("access-control-allow-origin", ""), | ||||
| 	pair("age", ""), | ||||
| 	pair("allow", ""), | ||||
| 	pair("authorization", ""), | ||||
| 	pair("cache-control", ""), | ||||
| 	pair("content-disposition", ""), | ||||
| 	pair("content-encoding", ""), | ||||
| 	pair("content-language", ""), | ||||
| 	pair("content-length", ""), | ||||
| 	pair("content-location", ""), | ||||
| 	pair("content-range", ""), | ||||
| 	pair("content-type", ""), | ||||
| 	pair("cookie", ""), | ||||
| 	pair("date", ""), | ||||
| 	pair("etag", ""), | ||||
| 	pair("expect", ""), | ||||
| 	pair("expires", ""), | ||||
| 	pair("from", ""), | ||||
| 	pair("host", ""), | ||||
| 	pair("if-match", ""), | ||||
| 	pair("if-modified-since", ""), | ||||
| 	pair("if-none-match", ""), | ||||
| 	pair("if-range", ""), | ||||
| 	pair("if-unmodified-since", ""), | ||||
| 	pair("last-modified", ""), | ||||
| 	pair("link", ""), | ||||
| 	pair("location", ""), | ||||
| 	pair("max-forwards", ""), | ||||
| 	pair("proxy-authenticate", ""), | ||||
| 	pair("proxy-authorization", ""), | ||||
| 	pair("range", ""), | ||||
| 	pair("referer", ""), | ||||
| 	pair("refresh", ""), | ||||
| 	pair("retry-after", ""), | ||||
| 	pair("server", ""), | ||||
| 	pair("set-cookie", ""), | ||||
| 	pair("strict-transport-security", ""), | ||||
| 	pair("transfer-encoding", ""), | ||||
| 	pair("user-agent", ""), | ||||
| 	pair("vary", ""), | ||||
| 	pair("via", ""), | ||||
| 	pair("www-authenticate", ""), | ||||
| var staticTable = newStaticTable() | ||||
| var staticTableEntries = [...]HeaderField{ | ||||
| 	{Name: ":authority"}, | ||||
| 	{Name: ":method", Value: "GET"}, | ||||
| 	{Name: ":method", Value: "POST"}, | ||||
| 	{Name: ":path", Value: "/"}, | ||||
| 	{Name: ":path", Value: "/index.html"}, | ||||
| 	{Name: ":scheme", Value: "http"}, | ||||
| 	{Name: ":scheme", Value: "https"}, | ||||
| 	{Name: ":status", Value: "200"}, | ||||
| 	{Name: ":status", Value: "204"}, | ||||
| 	{Name: ":status", Value: "206"}, | ||||
| 	{Name: ":status", Value: "304"}, | ||||
| 	{Name: ":status", Value: "400"}, | ||||
| 	{Name: ":status", Value: "404"}, | ||||
| 	{Name: ":status", Value: "500"}, | ||||
| 	{Name: "accept-charset"}, | ||||
| 	{Name: "accept-encoding", Value: "gzip, deflate"}, | ||||
| 	{Name: "accept-language"}, | ||||
| 	{Name: "accept-ranges"}, | ||||
| 	{Name: "accept"}, | ||||
| 	{Name: "access-control-allow-origin"}, | ||||
| 	{Name: "age"}, | ||||
| 	{Name: "allow"}, | ||||
| 	{Name: "authorization"}, | ||||
| 	{Name: "cache-control"}, | ||||
| 	{Name: "content-disposition"}, | ||||
| 	{Name: "content-encoding"}, | ||||
| 	{Name: "content-language"}, | ||||
| 	{Name: "content-length"}, | ||||
| 	{Name: "content-location"}, | ||||
| 	{Name: "content-range"}, | ||||
| 	{Name: "content-type"}, | ||||
| 	{Name: "cookie"}, | ||||
| 	{Name: "date"}, | ||||
| 	{Name: "etag"}, | ||||
| 	{Name: "expect"}, | ||||
| 	{Name: "expires"}, | ||||
| 	{Name: "from"}, | ||||
| 	{Name: "host"}, | ||||
| 	{Name: "if-match"}, | ||||
| 	{Name: "if-modified-since"}, | ||||
| 	{Name: "if-none-match"}, | ||||
| 	{Name: "if-range"}, | ||||
| 	{Name: "if-unmodified-since"}, | ||||
| 	{Name: "last-modified"}, | ||||
| 	{Name: "link"}, | ||||
| 	{Name: "location"}, | ||||
| 	{Name: "max-forwards"}, | ||||
| 	{Name: "proxy-authenticate"}, | ||||
| 	{Name: "proxy-authorization"}, | ||||
| 	{Name: "range"}, | ||||
| 	{Name: "referer"}, | ||||
| 	{Name: "refresh"}, | ||||
| 	{Name: "retry-after"}, | ||||
| 	{Name: "server"}, | ||||
| 	{Name: "set-cookie"}, | ||||
| 	{Name: "strict-transport-security"}, | ||||
| 	{Name: "transfer-encoding"}, | ||||
| 	{Name: "user-agent"}, | ||||
| 	{Name: "vary"}, | ||||
| 	{Name: "via"}, | ||||
| 	{Name: "www-authenticate"}, | ||||
| } | ||||
|  | ||||
| func newStaticTable() *headerFieldTable { | ||||
| 	t := &headerFieldTable{} | ||||
| 	t.init() | ||||
| 	for _, e := range staticTableEntries[:] { | ||||
| 		t.addEntry(e) | ||||
| 	} | ||||
| 	return t | ||||
| } | ||||
|  | ||||
| var huffmanCodes = [256]uint32{ | ||||
|   | ||||
							
								
								
									
										36
									
								
								vendor/golang.org/x/net/http2/http2.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								vendor/golang.org/x/net/http2/http2.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -36,6 +36,7 @@ var ( | ||||
| 	VerboseLogs    bool | ||||
| 	logFrameWrites bool | ||||
| 	logFrameReads  bool | ||||
| 	inTests        bool | ||||
| ) | ||||
|  | ||||
| func init() { | ||||
| @@ -77,13 +78,23 @@ var ( | ||||
|  | ||||
| type streamState int | ||||
|  | ||||
| // HTTP/2 stream states. | ||||
| // | ||||
| // See http://tools.ietf.org/html/rfc7540#section-5.1. | ||||
| // | ||||
| // For simplicity, the server code merges "reserved (local)" into | ||||
| // "half-closed (remote)". This is one less state transition to track. | ||||
| // The only downside is that we send PUSH_PROMISEs slightly less | ||||
| // liberally than allowable. More discussion here: | ||||
| // https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html | ||||
| // | ||||
| // "reserved (remote)" is omitted since the client code does not | ||||
| // support server push. | ||||
| const ( | ||||
| 	stateIdle streamState = iota | ||||
| 	stateOpen | ||||
| 	stateHalfClosedLocal | ||||
| 	stateHalfClosedRemote | ||||
| 	stateResvLocal | ||||
| 	stateResvRemote | ||||
| 	stateClosed | ||||
| ) | ||||
|  | ||||
| @@ -92,8 +103,6 @@ var stateName = [...]string{ | ||||
| 	stateOpen:             "Open", | ||||
| 	stateHalfClosedLocal:  "HalfClosedLocal", | ||||
| 	stateHalfClosedRemote: "HalfClosedRemote", | ||||
| 	stateResvLocal:        "ResvLocal", | ||||
| 	stateResvRemote:       "ResvRemote", | ||||
| 	stateClosed:           "Closed", | ||||
| } | ||||
|  | ||||
| @@ -253,14 +262,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter { | ||||
| 	return &bufferedWriter{w: w} | ||||
| } | ||||
|  | ||||
| // bufWriterPoolBufferSize is the size of bufio.Writer's | ||||
| // buffers created using bufWriterPool. | ||||
| // | ||||
| // TODO: pick a less arbitrary value? this is a bit under | ||||
| // (3 x typical 1500 byte MTU) at least. Other than that, | ||||
| // not much thought went into it. | ||||
| const bufWriterPoolBufferSize = 4 << 10 | ||||
|  | ||||
| var bufWriterPool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		// TODO: pick something better? this is a bit under | ||||
| 		// (3 x typical 1500 byte MTU) at least. | ||||
| 		return bufio.NewWriterSize(nil, 4<<10) | ||||
| 		return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| func (w *bufferedWriter) Available() int { | ||||
| 	if w.bw == nil { | ||||
| 		return bufWriterPoolBufferSize | ||||
| 	} | ||||
| 	return w.bw.Available() | ||||
| } | ||||
|  | ||||
| func (w *bufferedWriter) Write(p []byte) (n int, err error) { | ||||
| 	if w.bw == nil { | ||||
| 		bw := bufWriterPool.Get().(*bufio.Writer) | ||||
|   | ||||
							
								
								
									
										25
									
								
								vendor/golang.org/x/net/http2/not_go16.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								vendor/golang.org/x/net/http2/not_go16.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -7,7 +7,6 @@ | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"crypto/tls" | ||||
| 	"net/http" | ||||
| 	"time" | ||||
| ) | ||||
| @@ -20,27 +19,3 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { | ||||
| 	return 0 | ||||
|  | ||||
| } | ||||
|  | ||||
| // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. | ||||
| func isBadCipher(cipher uint16) bool { | ||||
| 	switch cipher { | ||||
| 	case tls.TLS_RSA_WITH_RC4_128_SHA, | ||||
| 		tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		tls.TLS_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		tls.TLS_RSA_WITH_AES_256_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, | ||||
| 		tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, | ||||
| 		tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: | ||||
| 		// Reject cipher suites from Appendix A. | ||||
| 		// "This list includes those cipher suites that do not | ||||
| 		// offer an ephemeral key exchange and those that are | ||||
| 		// based on the TLS null, stream or block cipher type" | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
|   | ||||
							
								
								
									
										27
									
								
								vendor/golang.org/x/net/http2/not_go18.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/golang.org/x/net/http2/not_go18.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build !go1.8 | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| ) | ||||
|  | ||||
| func configureServer18(h1 *http.Server, h2 *Server) error { | ||||
| 	// No IdleTimeout to sync prior to Go 1.8. | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func shouldLogPanic(panicValue interface{}) bool { | ||||
| 	return panicValue != nil | ||||
| } | ||||
|  | ||||
| func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func reqBodyIsNoBody(io.ReadCloser) bool { return false } | ||||
							
								
								
									
										16
									
								
								vendor/golang.org/x/net/http2/not_go19.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								vendor/golang.org/x/net/http2/not_go19.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build !go1.9 | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"net/http" | ||||
| ) | ||||
|  | ||||
| func configureServer19(s *http.Server, conf *Server) error { | ||||
| 	// not supported prior to go1.9 | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										16
									
								
								vendor/golang.org/x/net/http2/pipe.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										16
									
								
								vendor/golang.org/x/net/http2/pipe.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -10,13 +10,13 @@ import ( | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // pipe is a goroutine-safe io.Reader/io.Writer pair.  It's like | ||||
| // pipe is a goroutine-safe io.Reader/io.Writer pair. It's like | ||||
| // io.Pipe except there are no PipeReader/PipeWriter halves, and the | ||||
| // underlying buffer is an interface. (io.Pipe is always unbuffered) | ||||
| type pipe struct { | ||||
| 	mu       sync.Mutex | ||||
| 	c        sync.Cond // c.L lazily initialized to &p.mu | ||||
| 	b        pipeBuffer | ||||
| 	c        sync.Cond     // c.L lazily initialized to &p.mu | ||||
| 	b        pipeBuffer    // nil when done reading | ||||
| 	err      error         // read error once empty. non-nil means closed. | ||||
| 	breakErr error         // immediate read error (caller doesn't see rest of b) | ||||
| 	donec    chan struct{} // closed on error | ||||
| @@ -32,6 +32,9 @@ type pipeBuffer interface { | ||||
| func (p *pipe) Len() int { | ||||
| 	p.mu.Lock() | ||||
| 	defer p.mu.Unlock() | ||||
| 	if p.b == nil { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return p.b.Len() | ||||
| } | ||||
|  | ||||
| @@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { | ||||
| 				p.readFn()     // e.g. copy trailers | ||||
| 				p.readFn = nil // not sticky like p.err | ||||
| 			} | ||||
| 			p.b = nil | ||||
| 			return 0, p.err | ||||
| 		} | ||||
| 		p.c.Wait() | ||||
| @@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { | ||||
| 	if p.err != nil { | ||||
| 		return 0, errClosedPipeWrite | ||||
| 	} | ||||
| 	if p.breakErr != nil { | ||||
| 		return len(d), nil // discard when there is no reader | ||||
| 	} | ||||
| 	return p.b.Write(d) | ||||
| } | ||||
|  | ||||
| @@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) { | ||||
| 		return | ||||
| 	} | ||||
| 	p.readFn = fn | ||||
| 	if dst == &p.breakErr { | ||||
| 		p.b = nil | ||||
| 	} | ||||
| 	*dst = err | ||||
| 	p.closeDoneLocked() | ||||
| } | ||||
|   | ||||
							
								
								
									
										1297
									
								
								vendor/golang.org/x/net/http2/server.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1297
									
								
								vendor/golang.org/x/net/http2/server.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										145
									
								
								vendor/golang.org/x/net/http2/transport.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										145
									
								
								vendor/golang.org/x/net/http2/transport.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -191,6 +191,7 @@ type clientStream struct { | ||||
| 	ID            uint32 | ||||
| 	resc          chan resAndError | ||||
| 	bufPipe       pipe // buffered pipe with the flow-controlled response payload | ||||
| 	startedWrite  bool // started request body write; guarded by cc.mu | ||||
| 	requestedGzip bool | ||||
| 	on100         func() // optional code to run if get a 100 continue response | ||||
|  | ||||
| @@ -199,6 +200,7 @@ type clientStream struct { | ||||
| 	bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read | ||||
| 	readErr     error // sticky read error; owned by transportResponseBody.Read | ||||
| 	stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu | ||||
| 	didReset    bool  // whether we sent a RST_STREAM to the server; guarded by cc.mu | ||||
|  | ||||
| 	peerReset chan struct{} // closed on peer reset | ||||
| 	resetErr  error         // populated before peerReset is closed | ||||
| @@ -226,15 +228,26 @@ func (cs *clientStream) awaitRequestCancel(req *http.Request) { | ||||
| 	} | ||||
| 	select { | ||||
| 	case <-req.Cancel: | ||||
| 		cs.cancelStream() | ||||
| 		cs.bufPipe.CloseWithError(errRequestCanceled) | ||||
| 		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||||
| 	case <-ctx.Done(): | ||||
| 		cs.cancelStream() | ||||
| 		cs.bufPipe.CloseWithError(ctx.Err()) | ||||
| 		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||||
| 	case <-cs.done: | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (cs *clientStream) cancelStream() { | ||||
| 	cs.cc.mu.Lock() | ||||
| 	didReset := cs.didReset | ||||
| 	cs.didReset = true | ||||
| 	cs.cc.mu.Unlock() | ||||
|  | ||||
| 	if !didReset { | ||||
| 		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // checkResetOrDone reports any error sent in a RST_STREAM frame by the | ||||
| // server, or errStreamClosed if the stream is complete. | ||||
| func (cs *clientStream) checkResetOrDone() error { | ||||
| @@ -302,6 +315,10 @@ func authorityAddr(scheme string, authority string) (addr string) { | ||||
| 	if a, err := idna.ToASCII(host); err == nil { | ||||
| 		host = a | ||||
| 	} | ||||
| 	// IPv6 address literal, without a port: | ||||
| 	if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { | ||||
| 		return host + ":" + port | ||||
| 	} | ||||
| 	return net.JoinHostPort(host, port) | ||||
| } | ||||
|  | ||||
| @@ -320,8 +337,10 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res | ||||
| 		} | ||||
| 		traceGotConn(req, cc) | ||||
| 		res, err := cc.RoundTrip(req) | ||||
| 		if shouldRetryRequest(req, err) { | ||||
| 			continue | ||||
| 		if err != nil { | ||||
| 			if req, err = shouldRetryRequest(req, err); err == nil { | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			t.vlogf("RoundTrip failure: %v", err) | ||||
| @@ -343,12 +362,41 @@ func (t *Transport) CloseIdleConnections() { | ||||
| var ( | ||||
| 	errClientConnClosed   = errors.New("http2: client conn is closed") | ||||
| 	errClientConnUnusable = errors.New("http2: client conn not usable") | ||||
|  | ||||
| 	errClientConnGotGoAway                 = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") | ||||
| 	errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") | ||||
| ) | ||||
|  | ||||
| func shouldRetryRequest(req *http.Request, err error) bool { | ||||
| 	// TODO: retry GET requests (no bodies) more aggressively, if shutdown | ||||
| 	// before response. | ||||
| 	return err == errClientConnUnusable | ||||
| // shouldRetryRequest is called by RoundTrip when a request fails to get | ||||
| // response headers. It is always called with a non-nil error. | ||||
| // It returns either a request to retry (either the same request, or a | ||||
| // modified clone), or an error if the request can't be replayed. | ||||
| func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { | ||||
| 	switch err { | ||||
| 	default: | ||||
| 		return nil, err | ||||
| 	case errClientConnUnusable, errClientConnGotGoAway: | ||||
| 		return req, nil | ||||
| 	case errClientConnGotGoAwayAfterSomeReqBody: | ||||
| 		// If the Body is nil (or http.NoBody), it's safe to reuse | ||||
| 		// this request and its Body. | ||||
| 		if req.Body == nil || reqBodyIsNoBody(req.Body) { | ||||
| 			return req, nil | ||||
| 		} | ||||
| 		// Otherwise we depend on the Request having its GetBody | ||||
| 		// func defined. | ||||
| 		getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody | ||||
| 		if getBody == nil { | ||||
| 			return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") | ||||
| 		} | ||||
| 		body, err := getBody() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		newReq := *req | ||||
| 		newReq.Body = body | ||||
| 		return &newReq, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { | ||||
| @@ -501,6 +549,15 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { | ||||
| 	if old != nil && old.ErrCode != ErrCodeNo { | ||||
| 		cc.goAway.ErrCode = old.ErrCode | ||||
| 	} | ||||
| 	last := f.LastStreamID | ||||
| 	for streamID, cs := range cc.streams { | ||||
| 		if streamID > last { | ||||
| 			select { | ||||
| 			case cs.resc <- resAndError{err: errClientConnGotGoAway}: | ||||
| 			default: | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (cc *ClientConn) CanTakeNewRequest() bool { | ||||
| @@ -518,7 +575,7 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { | ||||
| 		cc.nextStreamID < math.MaxInt32 | ||||
| } | ||||
|  | ||||
| // onIdleTimeout is called from a time.AfterFunc goroutine.  It will | ||||
| // onIdleTimeout is called from a time.AfterFunc goroutine. It will | ||||
| // only be called when we're idle, but because we're coming from a new | ||||
| // goroutine, there could be a new request coming in at the same time, | ||||
| // so this simply calls the synchronized closeIfIdle to shut down this | ||||
| @@ -601,8 +658,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { | ||||
| 	} | ||||
| 	if len(keys) > 0 { | ||||
| 		sort.Strings(keys) | ||||
| 		// TODO: could do better allocation-wise here, but trailers are rare, | ||||
| 		// so being lazy for now. | ||||
| 		return strings.Join(keys, ","), nil | ||||
| 	} | ||||
| 	return "", nil | ||||
| @@ -635,39 +690,17 @@ func checkConnHeaders(req *http.Request) error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) { | ||||
| 	body = req.Body | ||||
| 	if body == nil { | ||||
| 		return nil, 0 | ||||
| // actualContentLength returns a sanitized version of | ||||
| // req.ContentLength, where 0 actually means zero (not unknown) and -1 | ||||
| // means unknown. | ||||
| func actualContentLength(req *http.Request) int64 { | ||||
| 	if req.Body == nil { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if req.ContentLength != 0 { | ||||
| 		return req.Body, req.ContentLength | ||||
| 		return req.ContentLength | ||||
| 	} | ||||
| 	// Don't try to sniff the size if they're doing an expect | ||||
| 	// request (Issue 16002): | ||||
| 	if req.Header.Get("Expect") == "100-continue" { | ||||
| 		return req.Body, -1 | ||||
| 	} | ||||
|  | ||||
| 	// We have a body but a zero content length. Test to see if | ||||
| 	// it's actually zero or just unset. | ||||
| 	var buf [1]byte | ||||
| 	n, rerr := body.Read(buf[:]) | ||||
| 	if rerr != nil && rerr != io.EOF { | ||||
| 		return errorReader{rerr}, -1 | ||||
| 	} | ||||
| 	if n == 1 { | ||||
| 		// Oh, guess there is data in this Body Reader after all. | ||||
| 		// The ContentLength field just wasn't set. | ||||
| 		// Stitch the Body back together again, re-attaching our | ||||
| 		// consumed byte. | ||||
| 		if rerr == io.EOF { | ||||
| 			return bytes.NewReader(buf[:]), 1 | ||||
| 		} | ||||
| 		return io.MultiReader(bytes.NewReader(buf[:]), body), -1 | ||||
| 	} | ||||
| 	// Body is actually zero bytes. | ||||
| 	return nil, 0 | ||||
| 	return -1 | ||||
| } | ||||
|  | ||||
| func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { | ||||
| @@ -691,8 +724,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { | ||||
| 		return nil, errClientConnUnusable | ||||
| 	} | ||||
|  | ||||
| 	body, contentLen := bodyAndLength(req) | ||||
| 	body := req.Body | ||||
| 	hasBody := body != nil | ||||
| 	contentLen := actualContentLength(req) | ||||
|  | ||||
| 	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? | ||||
| 	var requestedGzip bool | ||||
| @@ -775,13 +809,20 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { | ||||
| 			// 2xx, however, then assume the server DOES potentially | ||||
| 			// want our body (e.g. full-duplex streaming: | ||||
| 			// golang.org/issue/13444). If it turns out the server | ||||
| 			// doesn't, they'll RST_STREAM us soon enough.  This is a | ||||
| 			// heuristic to avoid adding knobs to Transport.  Hopefully | ||||
| 			// doesn't, they'll RST_STREAM us soon enough. This is a | ||||
| 			// heuristic to avoid adding knobs to Transport. Hopefully | ||||
| 			// we can keep it. | ||||
| 			bodyWriter.cancel() | ||||
| 			cs.abortRequestBodyWrite(errStopReqBodyWrite) | ||||
| 		} | ||||
| 		if re.err != nil { | ||||
| 			if re.err == errClientConnGotGoAway { | ||||
| 				cc.mu.Lock() | ||||
| 				if cs.startedWrite { | ||||
| 					re.err = errClientConnGotGoAwayAfterSomeReqBody | ||||
| 				} | ||||
| 				cc.mu.Unlock() | ||||
| 			} | ||||
| 			cc.forgetStreamID(cs.ID) | ||||
| 			return nil, re.err | ||||
| 		} | ||||
| @@ -1487,8 +1528,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra | ||||
| 		return res, nil | ||||
| 	} | ||||
|  | ||||
| 	buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage | ||||
| 	cs.bufPipe = pipe{b: buf} | ||||
| 	cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} | ||||
| 	cs.bytesRemain = res.ContentLength | ||||
| 	res.Body = transportResponseBody{cs} | ||||
| 	go cs.awaitRequestCancel(cs.req) | ||||
| @@ -1615,6 +1655,7 @@ func (b transportResponseBody) Close() error { | ||||
| 		cc.wmu.Lock() | ||||
| 		if !serverSentStreamEnd { | ||||
| 			cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) | ||||
| 			cs.didReset = true | ||||
| 		} | ||||
| 		// Return connection-level flow control. | ||||
| 		if unread > 0 { | ||||
| @@ -1662,12 +1703,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if f.Length > 0 { | ||||
| 		if len(data) > 0 && cs.bufPipe.b == nil { | ||||
| 			// Data frame after it's already closed? | ||||
| 			cc.logf("http2: Transport received DATA frame for closed stream; closing connection") | ||||
| 			return ConnectionError(ErrCodeProtocol) | ||||
| 		} | ||||
|  | ||||
| 		// Check connection-level flow control. | ||||
| 		cc.mu.Lock() | ||||
| 		if cs.inflow.available() >= int32(f.Length) { | ||||
| @@ -1687,9 +1722,10 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { | ||||
| 			cc.bw.Flush() | ||||
| 			cc.wmu.Unlock() | ||||
| 		} | ||||
| 		didReset := cs.didReset | ||||
| 		cc.mu.Unlock() | ||||
|  | ||||
| 		if len(data) > 0 { | ||||
| 		if len(data) > 0 && !didReset { | ||||
| 			if _, err := cs.bufPipe.Write(data); err != nil { | ||||
| 				rl.endStreamError(cs, err) | ||||
| 				return err | ||||
| @@ -2021,6 +2057,9 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body | ||||
| 	resc := make(chan error, 1) | ||||
| 	s.resc = resc | ||||
| 	s.fn = func() { | ||||
| 		cs.cc.mu.Lock() | ||||
| 		cs.startedWrite = true | ||||
| 		cs.cc.mu.Unlock() | ||||
| 		resc <- cs.writeRequestBody(body, cs.req.Body) | ||||
| 	} | ||||
| 	s.delay = t.expectContinueTimeout() | ||||
|   | ||||
							
								
								
									
										176
									
								
								vendor/golang.org/x/net/http2/write.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										176
									
								
								vendor/golang.org/x/net/http2/write.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -9,6 +9,7 @@ import ( | ||||
| 	"fmt" | ||||
| 	"log" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"time" | ||||
|  | ||||
| 	"golang.org/x/net/http2/hpack" | ||||
| @@ -18,6 +19,11 @@ import ( | ||||
| // writeFramer is implemented by any type that is used to write frames. | ||||
| type writeFramer interface { | ||||
| 	writeFrame(writeContext) error | ||||
|  | ||||
| 	// staysWithinBuffer reports whether this writer promises that | ||||
| 	// it will only write less than or equal to size bytes, and it | ||||
| 	// won't Flush the write context. | ||||
| 	staysWithinBuffer(size int) bool | ||||
| } | ||||
|  | ||||
| // writeContext is the interface needed by the various frame writer | ||||
| @@ -39,9 +45,10 @@ type writeContext interface { | ||||
| 	HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) | ||||
| } | ||||
|  | ||||
| // endsStream reports whether the given frame writer w will locally | ||||
| // close the stream. | ||||
| func endsStream(w writeFramer) bool { | ||||
| // writeEndsStream reports whether w writes a frame that will transition | ||||
| // the stream to a half-closed local state. This returns false for RST_STREAM, | ||||
| // which closes the entire stream (not just the local half). | ||||
| func writeEndsStream(w writeFramer) bool { | ||||
| 	switch v := w.(type) { | ||||
| 	case *writeData: | ||||
| 		return v.endStream | ||||
| @@ -51,7 +58,7 @@ func endsStream(w writeFramer) bool { | ||||
| 		// This can only happen if the caller reuses w after it's | ||||
| 		// been intentionally nil'ed out to prevent use. Keep this | ||||
| 		// here to catch future refactoring breaking it. | ||||
| 		panic("endsStream called on nil writeFramer") | ||||
| 		panic("writeEndsStream called on nil writeFramer") | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| @@ -62,8 +69,16 @@ func (flushFrameWriter) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Flush() | ||||
| } | ||||
|  | ||||
| func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } | ||||
|  | ||||
| type writeSettings []Setting | ||||
|  | ||||
| func (s writeSettings) staysWithinBuffer(max int) bool { | ||||
| 	const settingSize = 6 // uint16 + uint32 | ||||
| 	return frameHeaderLen+settingSize*len(s) <= max | ||||
|  | ||||
| } | ||||
|  | ||||
| func (s writeSettings) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Framer().WriteSettings([]Setting(s)...) | ||||
| } | ||||
| @@ -83,6 +98,8 @@ func (p *writeGoAway) writeFrame(ctx writeContext) error { | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes | ||||
|  | ||||
| type writeData struct { | ||||
| 	streamID  uint32 | ||||
| 	p         []byte | ||||
| @@ -97,6 +114,10 @@ func (w *writeData) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) | ||||
| } | ||||
|  | ||||
| func (w *writeData) staysWithinBuffer(max int) bool { | ||||
| 	return frameHeaderLen+len(w.p) <= max | ||||
| } | ||||
|  | ||||
| // handlerPanicRST is the message sent from handler goroutines when | ||||
| // the handler panics. | ||||
| type handlerPanicRST struct { | ||||
| @@ -107,22 +128,57 @@ func (hp handlerPanicRST) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) | ||||
| } | ||||
|  | ||||
| func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } | ||||
|  | ||||
| func (se StreamError) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) | ||||
| } | ||||
|  | ||||
| func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } | ||||
|  | ||||
| type writePingAck struct{ pf *PingFrame } | ||||
|  | ||||
| func (w writePingAck) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Framer().WritePing(true, w.pf.Data) | ||||
| } | ||||
|  | ||||
| func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } | ||||
|  | ||||
| type writeSettingsAck struct{} | ||||
|  | ||||
| func (writeSettingsAck) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Framer().WriteSettingsAck() | ||||
| } | ||||
|  | ||||
| func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } | ||||
|  | ||||
| // splitHeaderBlock splits headerBlock into fragments so that each fragment fits | ||||
| // in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true | ||||
| // for the first/last fragment, respectively. | ||||
| func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { | ||||
| 	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE | ||||
| 	// that all peers must support (16KB). Later we could care | ||||
| 	// more and send larger frames if the peer advertised it, but | ||||
| 	// there's little point. Most headers are small anyway (so we | ||||
| 	// generally won't have CONTINUATION frames), and extra frames | ||||
| 	// only waste 9 bytes anyway. | ||||
| 	const maxFrameSize = 16384 | ||||
|  | ||||
| 	first := true | ||||
| 	for len(headerBlock) > 0 { | ||||
| 		frag := headerBlock | ||||
| 		if len(frag) > maxFrameSize { | ||||
| 			frag = frag[:maxFrameSize] | ||||
| 		} | ||||
| 		headerBlock = headerBlock[len(frag):] | ||||
| 		if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		first = false | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames | ||||
| // for HTTP response headers or trailers from a server handler. | ||||
| type writeResHeaders struct { | ||||
| @@ -144,6 +200,17 @@ func encKV(enc *hpack.Encoder, k, v string) { | ||||
| 	enc.WriteField(hpack.HeaderField{Name: k, Value: v}) | ||||
| } | ||||
|  | ||||
| func (w *writeResHeaders) staysWithinBuffer(max int) bool { | ||||
| 	// TODO: this is a common one. It'd be nice to return true | ||||
| 	// here and get into the fast path if we could be clever and | ||||
| 	// calculate the size fast enough, or at least a conservative | ||||
| 	// uppper bound that usually fires. (Maybe if w.h and | ||||
| 	// w.trailers are nil, so we don't need to enumerate it.) | ||||
| 	// Otherwise I'm afraid that just calculating the length to | ||||
| 	// answer this question would be slower than the ~2µs benefit. | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (w *writeResHeaders) writeFrame(ctx writeContext) error { | ||||
| 	enc, buf := ctx.HeaderEncoder() | ||||
| 	buf.Reset() | ||||
| @@ -169,39 +236,69 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error { | ||||
| 		panic("unexpected empty hpack") | ||||
| 	} | ||||
|  | ||||
| 	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE | ||||
| 	// that all peers must support (16KB). Later we could care | ||||
| 	// more and send larger frames if the peer advertised it, but | ||||
| 	// there's little point. Most headers are small anyway (so we | ||||
| 	// generally won't have CONTINUATION frames), and extra frames | ||||
| 	// only waste 9 bytes anyway. | ||||
| 	const maxFrameSize = 16384 | ||||
| 	return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) | ||||
| } | ||||
|  | ||||
| 	first := true | ||||
| 	for len(headerBlock) > 0 { | ||||
| 		frag := headerBlock | ||||
| 		if len(frag) > maxFrameSize { | ||||
| 			frag = frag[:maxFrameSize] | ||||
| 		} | ||||
| 		headerBlock = headerBlock[len(frag):] | ||||
| 		endHeaders := len(headerBlock) == 0 | ||||
| 		var err error | ||||
| 		if first { | ||||
| 			first = false | ||||
| 			err = ctx.Framer().WriteHeaders(HeadersFrameParam{ | ||||
| 				StreamID:      w.streamID, | ||||
| 				BlockFragment: frag, | ||||
| 				EndStream:     w.endStream, | ||||
| 				EndHeaders:    endHeaders, | ||||
| 			}) | ||||
| 		} else { | ||||
| 			err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { | ||||
| 	if firstFrag { | ||||
| 		return ctx.Framer().WriteHeaders(HeadersFrameParam{ | ||||
| 			StreamID:      w.streamID, | ||||
| 			BlockFragment: frag, | ||||
| 			EndStream:     w.endStream, | ||||
| 			EndHeaders:    lastFrag, | ||||
| 		}) | ||||
| 	} else { | ||||
| 		return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. | ||||
| type writePushPromise struct { | ||||
| 	streamID uint32   // pusher stream | ||||
| 	method   string   // for :method | ||||
| 	url      *url.URL // for :scheme, :authority, :path | ||||
| 	h        http.Header | ||||
|  | ||||
| 	// Creates an ID for a pushed stream. This runs on serveG just before | ||||
| 	// the frame is written. The returned ID is copied to promisedID. | ||||
| 	allocatePromisedID func() (uint32, error) | ||||
| 	promisedID         uint32 | ||||
| } | ||||
|  | ||||
| func (w *writePushPromise) staysWithinBuffer(max int) bool { | ||||
| 	// TODO: see writeResHeaders.staysWithinBuffer | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (w *writePushPromise) writeFrame(ctx writeContext) error { | ||||
| 	enc, buf := ctx.HeaderEncoder() | ||||
| 	buf.Reset() | ||||
|  | ||||
| 	encKV(enc, ":method", w.method) | ||||
| 	encKV(enc, ":scheme", w.url.Scheme) | ||||
| 	encKV(enc, ":authority", w.url.Host) | ||||
| 	encKV(enc, ":path", w.url.RequestURI()) | ||||
| 	encodeHeaders(enc, w.h, nil) | ||||
|  | ||||
| 	headerBlock := buf.Bytes() | ||||
| 	if len(headerBlock) == 0 { | ||||
| 		panic("unexpected empty hpack") | ||||
| 	} | ||||
|  | ||||
| 	return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) | ||||
| } | ||||
|  | ||||
| func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { | ||||
| 	if firstFrag { | ||||
| 		return ctx.Framer().WritePushPromise(PushPromiseParam{ | ||||
| 			StreamID:      w.streamID, | ||||
| 			PromiseID:     w.promisedID, | ||||
| 			BlockFragment: frag, | ||||
| 			EndHeaders:    lastFrag, | ||||
| 		}) | ||||
| 	} else { | ||||
| 		return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type write100ContinueHeadersFrame struct { | ||||
| @@ -220,15 +317,24 @@ func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { | ||||
| 	// Sloppy but conservative: | ||||
| 	return 9+2*(len(":status")+len("100")) <= max | ||||
| } | ||||
|  | ||||
| type writeWindowUpdate struct { | ||||
| 	streamID uint32 // or 0 for conn-level | ||||
| 	n        uint32 | ||||
| } | ||||
|  | ||||
| func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } | ||||
|  | ||||
| func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { | ||||
| 	return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) | ||||
| } | ||||
|  | ||||
| // encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) | ||||
| // is encoded only only if k is in keys. | ||||
| func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { | ||||
| 	if keys == nil { | ||||
| 		sorter := sorterPool.Get().(*sorter) | ||||
|   | ||||
							
								
								
									
										429
									
								
								vendor/golang.org/x/net/http2/writesched.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										429
									
								
								vendor/golang.org/x/net/http2/writesched.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -6,14 +6,53 @@ package http2 | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // frameWriteMsg is a request to write a frame. | ||||
| type frameWriteMsg struct { | ||||
| // WriteScheduler is the interface implemented by HTTP/2 write schedulers. | ||||
| // Methods are never called concurrently. | ||||
| type WriteScheduler interface { | ||||
| 	// OpenStream opens a new stream in the write scheduler. | ||||
| 	// It is illegal to call this with streamID=0 or with a streamID that is | ||||
| 	// already open -- the call may panic. | ||||
| 	OpenStream(streamID uint32, options OpenStreamOptions) | ||||
|  | ||||
| 	// CloseStream closes a stream in the write scheduler. Any frames queued on | ||||
| 	// this stream should be discarded. It is illegal to call this on a stream | ||||
| 	// that is not open -- the call may panic. | ||||
| 	CloseStream(streamID uint32) | ||||
|  | ||||
| 	// AdjustStream adjusts the priority of the given stream. This may be called | ||||
| 	// on a stream that has not yet been opened or has been closed. Note that | ||||
| 	// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: | ||||
| 	// https://tools.ietf.org/html/rfc7540#section-5.1 | ||||
| 	AdjustStream(streamID uint32, priority PriorityParam) | ||||
|  | ||||
| 	// Push queues a frame in the scheduler. In most cases, this will not be | ||||
| 	// called with wr.StreamID()!=0 unless that stream is currently open. The one | ||||
| 	// exception is RST_STREAM frames, which may be sent on idle or closed streams. | ||||
| 	Push(wr FrameWriteRequest) | ||||
|  | ||||
| 	// Pop dequeues the next frame to write. Returns false if no frames can | ||||
| 	// be written. Frames with a given wr.StreamID() are Pop'd in the same | ||||
| 	// order they are Push'd. | ||||
| 	Pop() (wr FrameWriteRequest, ok bool) | ||||
| } | ||||
|  | ||||
| // OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. | ||||
| type OpenStreamOptions struct { | ||||
| 	// PusherID is zero if the stream was initiated by the client. Otherwise, | ||||
| 	// PusherID names the stream that pushed the newly opened stream. | ||||
| 	PusherID uint32 | ||||
| } | ||||
|  | ||||
| // FrameWriteRequest is a request to write a frame. | ||||
| type FrameWriteRequest struct { | ||||
| 	// write is the interface value that does the writing, once the | ||||
| 	// writeScheduler (below) has decided to select this frame | ||||
| 	// to write. The write functions are all defined in write.go. | ||||
| 	// WriteScheduler has selected this frame to write. The write | ||||
| 	// functions are all defined in write.go. | ||||
| 	write writeFramer | ||||
|  | ||||
| 	stream *stream // used for prioritization. nil for non-stream frames. | ||||
| 	// stream is the stream on which this frame will be written. | ||||
| 	// nil for non-stream frames like PING and SETTINGS. | ||||
| 	stream *stream | ||||
|  | ||||
| 	// done, if non-nil, must be a buffered channel with space for | ||||
| 	// 1 message and is sent the return value from write (or an | ||||
| @@ -21,263 +60,183 @@ type frameWriteMsg struct { | ||||
| 	done chan error | ||||
| } | ||||
|  | ||||
| // for debugging only: | ||||
| func (wm frameWriteMsg) String() string { | ||||
| 	var streamID uint32 | ||||
| 	if wm.stream != nil { | ||||
| 		streamID = wm.stream.id | ||||
| 	} | ||||
| 	var des string | ||||
| 	if s, ok := wm.write.(fmt.Stringer); ok { | ||||
| 		des = s.String() | ||||
| 	} else { | ||||
| 		des = fmt.Sprintf("%T", wm.write) | ||||
| 	} | ||||
| 	return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) | ||||
| } | ||||
|  | ||||
| // writeScheduler tracks pending frames to write, priorities, and decides | ||||
| // the next one to use. It is not thread-safe. | ||||
| type writeScheduler struct { | ||||
| 	// zero are frames not associated with a specific stream. | ||||
| 	// They're sent before any stream-specific freams. | ||||
| 	zero writeQueue | ||||
|  | ||||
| 	// maxFrameSize is the maximum size of a DATA frame | ||||
| 	// we'll write. Must be non-zero and between 16K-16M. | ||||
| 	maxFrameSize uint32 | ||||
|  | ||||
| 	// sq contains the stream-specific queues, keyed by stream ID. | ||||
| 	// when a stream is idle, it's deleted from the map. | ||||
| 	sq map[uint32]*writeQueue | ||||
|  | ||||
| 	// canSend is a slice of memory that's reused between frame | ||||
| 	// scheduling decisions to hold the list of writeQueues (from sq) | ||||
| 	// which have enough flow control data to send. After canSend is | ||||
| 	// built, the best is selected. | ||||
| 	canSend []*writeQueue | ||||
|  | ||||
| 	// pool of empty queues for reuse. | ||||
| 	queuePool []*writeQueue | ||||
| } | ||||
|  | ||||
| func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { | ||||
| 	if len(q.s) != 0 { | ||||
| 		panic("queue must be empty") | ||||
| 	} | ||||
| 	ws.queuePool = append(ws.queuePool, q) | ||||
| } | ||||
|  | ||||
| func (ws *writeScheduler) getEmptyQueue() *writeQueue { | ||||
| 	ln := len(ws.queuePool) | ||||
| 	if ln == 0 { | ||||
| 		return new(writeQueue) | ||||
| 	} | ||||
| 	q := ws.queuePool[ln-1] | ||||
| 	ws.queuePool = ws.queuePool[:ln-1] | ||||
| 	return q | ||||
| } | ||||
|  | ||||
| func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } | ||||
|  | ||||
| func (ws *writeScheduler) add(wm frameWriteMsg) { | ||||
| 	st := wm.stream | ||||
| 	if st == nil { | ||||
| 		ws.zero.push(wm) | ||||
| 	} else { | ||||
| 		ws.streamQueue(st.id).push(wm) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { | ||||
| 	if q, ok := ws.sq[streamID]; ok { | ||||
| 		return q | ||||
| 	} | ||||
| 	if ws.sq == nil { | ||||
| 		ws.sq = make(map[uint32]*writeQueue) | ||||
| 	} | ||||
| 	q := ws.getEmptyQueue() | ||||
| 	ws.sq[streamID] = q | ||||
| 	return q | ||||
| } | ||||
|  | ||||
| // take returns the most important frame to write and removes it from the scheduler. | ||||
| // It is illegal to call this if the scheduler is empty or if there are no connection-level | ||||
| // flow control bytes available. | ||||
| func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { | ||||
| 	if ws.maxFrameSize == 0 { | ||||
| 		panic("internal error: ws.maxFrameSize not initialized or invalid") | ||||
| 	} | ||||
|  | ||||
| 	// If there any frames not associated with streams, prefer those first. | ||||
| 	// These are usually SETTINGS, etc. | ||||
| 	if !ws.zero.empty() { | ||||
| 		return ws.zero.shift(), true | ||||
| 	} | ||||
| 	if len(ws.sq) == 0 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Next, prioritize frames on streams that aren't DATA frames (no cost). | ||||
| 	for id, q := range ws.sq { | ||||
| 		if q.firstIsNoCost() { | ||||
| 			return ws.takeFrom(id, q) | ||||
| // StreamID returns the id of the stream this frame will be written to. | ||||
| // 0 is used for non-stream frames such as PING and SETTINGS. | ||||
| func (wr FrameWriteRequest) StreamID() uint32 { | ||||
| 	if wr.stream == nil { | ||||
| 		if se, ok := wr.write.(StreamError); ok { | ||||
| 			// (*serverConn).resetStream doesn't set | ||||
| 			// stream because it doesn't necessarily have | ||||
| 			// one. So special case this type of write | ||||
| 			// message. | ||||
| 			return se.StreamID | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Now, all that remains are DATA frames with non-zero bytes to | ||||
| 	// send. So pick the best one. | ||||
| 	if len(ws.canSend) != 0 { | ||||
| 		panic("should be empty") | ||||
| 	} | ||||
| 	for _, q := range ws.sq { | ||||
| 		if n := ws.streamWritableBytes(q); n > 0 { | ||||
| 			ws.canSend = append(ws.canSend, q) | ||||
| 		} | ||||
| 	} | ||||
| 	if len(ws.canSend) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	defer ws.zeroCanSend() | ||||
|  | ||||
| 	// TODO: find the best queue | ||||
| 	q := ws.canSend[0] | ||||
|  | ||||
| 	return ws.takeFrom(q.streamID(), q) | ||||
| } | ||||
|  | ||||
| // zeroCanSend is defered from take. | ||||
| func (ws *writeScheduler) zeroCanSend() { | ||||
| 	for i := range ws.canSend { | ||||
| 		ws.canSend[i] = nil | ||||
| 	} | ||||
| 	ws.canSend = ws.canSend[:0] | ||||
| } | ||||
|  | ||||
| // streamWritableBytes returns the number of DATA bytes we could write | ||||
| // from the given queue's stream, if this stream/queue were | ||||
| // selected. It is an error to call this if q's head isn't a | ||||
| // *writeData. | ||||
| func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { | ||||
| 	wm := q.head() | ||||
| 	ret := wm.stream.flow.available() // max we can write | ||||
| 	if ret == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if int32(ws.maxFrameSize) < ret { | ||||
| 		ret = int32(ws.maxFrameSize) | ||||
| 	} | ||||
| 	if ret == 0 { | ||||
| 		panic("internal error: ws.maxFrameSize not initialized or invalid") | ||||
| 	} | ||||
| 	wd := wm.write.(*writeData) | ||||
| 	if len(wd.p) < int(ret) { | ||||
| 		ret = int32(len(wd.p)) | ||||
| 	} | ||||
| 	return ret | ||||
| 	return wr.stream.id | ||||
| } | ||||
|  | ||||
| func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { | ||||
| 	wm = q.head() | ||||
| 	// If the first item in this queue costs flow control tokens | ||||
| 	// and we don't have enough, write as much as we can. | ||||
| 	if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { | ||||
| 		allowed := wm.stream.flow.available() // max we can write | ||||
| 		if allowed == 0 { | ||||
| 			// No quota available. Caller can try the next stream. | ||||
| 			return frameWriteMsg{}, false | ||||
| 		} | ||||
| 		if int32(ws.maxFrameSize) < allowed { | ||||
| 			allowed = int32(ws.maxFrameSize) | ||||
| 		} | ||||
| 		// TODO: further restrict the allowed size, because even if | ||||
| 		// the peer says it's okay to write 16MB data frames, we might | ||||
| 		// want to write smaller ones to properly weight competing | ||||
| 		// streams' priorities. | ||||
|  | ||||
| 		if len(wd.p) > int(allowed) { | ||||
| 			wm.stream.flow.take(allowed) | ||||
| 			chunk := wd.p[:allowed] | ||||
| 			wd.p = wd.p[allowed:] | ||||
| 			// Make up a new write message of a valid size, rather | ||||
| 			// than shifting one off the queue. | ||||
| 			return frameWriteMsg{ | ||||
| 				stream: wm.stream, | ||||
| 				write: &writeData{ | ||||
| 					streamID: wd.streamID, | ||||
| 					p:        chunk, | ||||
| 					// even if the original had endStream set, there | ||||
| 					// arebytes remaining because len(wd.p) > allowed, | ||||
| 					// so we know endStream is false: | ||||
| 					endStream: false, | ||||
| 				}, | ||||
| 				// our caller is blocking on the final DATA frame, not | ||||
| 				// these intermediates, so no need to wait: | ||||
| 				done: nil, | ||||
| 			}, true | ||||
| 		} | ||||
| 		wm.stream.flow.take(int32(len(wd.p))) | ||||
| // DataSize returns the number of flow control bytes that must be consumed | ||||
| // to write this entire frame. This is 0 for non-DATA frames. | ||||
| func (wr FrameWriteRequest) DataSize() int { | ||||
| 	if wd, ok := wr.write.(*writeData); ok { | ||||
| 		return len(wd.p) | ||||
| 	} | ||||
|  | ||||
| 	q.shift() | ||||
| 	if q.empty() { | ||||
| 		ws.putEmptyQueue(q) | ||||
| 		delete(ws.sq, id) | ||||
| 	} | ||||
| 	return wm, true | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (ws *writeScheduler) forgetStream(id uint32) { | ||||
| 	q, ok := ws.sq[id] | ||||
| 	if !ok { | ||||
| // Consume consumes min(n, available) bytes from this frame, where available | ||||
| // is the number of flow control bytes available on the stream. Consume returns | ||||
| // 0, 1, or 2 frames, where the integer return value gives the number of frames | ||||
| // returned. | ||||
| // | ||||
| // If flow control prevents consuming any bytes, this returns (_, _, 0). If | ||||
| // the entire frame was consumed, this returns (wr, _, 1). Otherwise, this | ||||
| // returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and | ||||
| // 'rest' contains the remaining bytes. The consumed bytes are deducted from the | ||||
| // underlying stream's flow control budget. | ||||
| func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { | ||||
| 	var empty FrameWriteRequest | ||||
|  | ||||
| 	// Non-DATA frames are always consumed whole. | ||||
| 	wd, ok := wr.write.(*writeData) | ||||
| 	if !ok || len(wd.p) == 0 { | ||||
| 		return wr, empty, 1 | ||||
| 	} | ||||
|  | ||||
| 	// Might need to split after applying limits. | ||||
| 	allowed := wr.stream.flow.available() | ||||
| 	if n < allowed { | ||||
| 		allowed = n | ||||
| 	} | ||||
| 	if wr.stream.sc.maxFrameSize < allowed { | ||||
| 		allowed = wr.stream.sc.maxFrameSize | ||||
| 	} | ||||
| 	if allowed <= 0 { | ||||
| 		return empty, empty, 0 | ||||
| 	} | ||||
| 	if len(wd.p) > int(allowed) { | ||||
| 		wr.stream.flow.take(allowed) | ||||
| 		consumed := FrameWriteRequest{ | ||||
| 			stream: wr.stream, | ||||
| 			write: &writeData{ | ||||
| 				streamID: wd.streamID, | ||||
| 				p:        wd.p[:allowed], | ||||
| 				// Even if the original had endStream set, there | ||||
| 				// are bytes remaining because len(wd.p) > allowed, | ||||
| 				// so we know endStream is false. | ||||
| 				endStream: false, | ||||
| 			}, | ||||
| 			// Our caller is blocking on the final DATA frame, not | ||||
| 			// this intermediate frame, so no need to wait. | ||||
| 			done: nil, | ||||
| 		} | ||||
| 		rest := FrameWriteRequest{ | ||||
| 			stream: wr.stream, | ||||
| 			write: &writeData{ | ||||
| 				streamID:  wd.streamID, | ||||
| 				p:         wd.p[allowed:], | ||||
| 				endStream: wd.endStream, | ||||
| 			}, | ||||
| 			done: wr.done, | ||||
| 		} | ||||
| 		return consumed, rest, 2 | ||||
| 	} | ||||
|  | ||||
| 	// The frame is consumed whole. | ||||
| 	// NB: This cast cannot overflow because allowed is <= math.MaxInt32. | ||||
| 	wr.stream.flow.take(int32(len(wd.p))) | ||||
| 	return wr, empty, 1 | ||||
| } | ||||
|  | ||||
| // String is for debugging only. | ||||
| func (wr FrameWriteRequest) String() string { | ||||
| 	var des string | ||||
| 	if s, ok := wr.write.(fmt.Stringer); ok { | ||||
| 		des = s.String() | ||||
| 	} else { | ||||
| 		des = fmt.Sprintf("%T", wr.write) | ||||
| 	} | ||||
| 	return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) | ||||
| } | ||||
|  | ||||
| // replyToWriter sends err to wr.done and panics if the send must block | ||||
| // This does nothing if wr.done is nil. | ||||
| func (wr *FrameWriteRequest) replyToWriter(err error) { | ||||
| 	if wr.done == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	delete(ws.sq, id) | ||||
|  | ||||
| 	// But keep it for others later. | ||||
| 	for i := range q.s { | ||||
| 		q.s[i] = frameWriteMsg{} | ||||
| 	select { | ||||
| 	case wr.done <- err: | ||||
| 	default: | ||||
| 		panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) | ||||
| 	} | ||||
| 	q.s = q.s[:0] | ||||
| 	ws.putEmptyQueue(q) | ||||
| 	wr.write = nil // prevent use (assume it's tainted after wr.done send) | ||||
| } | ||||
|  | ||||
| // writeQueue is used by implementations of WriteScheduler. | ||||
| type writeQueue struct { | ||||
| 	s []frameWriteMsg | ||||
| 	s []FrameWriteRequest | ||||
| } | ||||
|  | ||||
| // streamID returns the stream ID for a non-empty stream-specific queue. | ||||
| func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } | ||||
|  | ||||
| func (q *writeQueue) empty() bool { return len(q.s) == 0 } | ||||
|  | ||||
| func (q *writeQueue) push(wm frameWriteMsg) { | ||||
| 	q.s = append(q.s, wm) | ||||
| func (q *writeQueue) push(wr FrameWriteRequest) { | ||||
| 	q.s = append(q.s, wr) | ||||
| } | ||||
|  | ||||
| // head returns the next item that would be removed by shift. | ||||
| func (q *writeQueue) head() frameWriteMsg { | ||||
| func (q *writeQueue) shift() FrameWriteRequest { | ||||
| 	if len(q.s) == 0 { | ||||
| 		panic("invalid use of queue") | ||||
| 	} | ||||
| 	return q.s[0] | ||||
| } | ||||
|  | ||||
| func (q *writeQueue) shift() frameWriteMsg { | ||||
| 	if len(q.s) == 0 { | ||||
| 		panic("invalid use of queue") | ||||
| 	} | ||||
| 	wm := q.s[0] | ||||
| 	wr := q.s[0] | ||||
| 	// TODO: less copy-happy queue. | ||||
| 	copy(q.s, q.s[1:]) | ||||
| 	q.s[len(q.s)-1] = frameWriteMsg{} | ||||
| 	q.s[len(q.s)-1] = FrameWriteRequest{} | ||||
| 	q.s = q.s[:len(q.s)-1] | ||||
| 	return wm | ||||
| 	return wr | ||||
| } | ||||
|  | ||||
| func (q *writeQueue) firstIsNoCost() bool { | ||||
| 	if df, ok := q.s[0].write.(*writeData); ok { | ||||
| 		return len(df.p) == 0 | ||||
| // consume consumes up to n bytes from q.s[0]. If the frame is | ||||
| // entirely consumed, it is removed from the queue. If the frame | ||||
| // is partially consumed, the frame is kept with the consumed | ||||
| // bytes removed. Returns true iff any bytes were consumed. | ||||
| func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { | ||||
| 	if len(q.s) == 0 { | ||||
| 		return FrameWriteRequest{}, false | ||||
| 	} | ||||
| 	return true | ||||
| 	consumed, rest, numresult := q.s[0].Consume(n) | ||||
| 	switch numresult { | ||||
| 	case 0: | ||||
| 		return FrameWriteRequest{}, false | ||||
| 	case 1: | ||||
| 		q.shift() | ||||
| 	case 2: | ||||
| 		q.s[0] = rest | ||||
| 	} | ||||
| 	return consumed, true | ||||
| } | ||||
|  | ||||
| type writeQueuePool []*writeQueue | ||||
|  | ||||
| // put inserts an unused writeQueue into the pool. | ||||
| func (p *writeQueuePool) put(q *writeQueue) { | ||||
| 	for i := range q.s { | ||||
| 		q.s[i] = FrameWriteRequest{} | ||||
| 	} | ||||
| 	q.s = q.s[:0] | ||||
| 	*p = append(*p, q) | ||||
| } | ||||
|  | ||||
| // get returns an empty writeQueue. | ||||
| func (p *writeQueuePool) get() *writeQueue { | ||||
| 	ln := len(*p) | ||||
| 	if ln == 0 { | ||||
| 		return new(writeQueue) | ||||
| 	} | ||||
| 	x := ln - 1 | ||||
| 	q := (*p)[x] | ||||
| 	(*p)[x] = nil | ||||
| 	*p = (*p)[:x] | ||||
| 	return q | ||||
| } | ||||
|   | ||||
							
								
								
									
										452
									
								
								vendor/golang.org/x/net/http2/writesched_priority.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										452
									
								
								vendor/golang.org/x/net/http2/writesched_priority.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,452 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"sort" | ||||
| ) | ||||
|  | ||||
| // RFC 7540, Section 5.3.5: the default weight is 16. | ||||
| const priorityDefaultWeight = 15 // 16 = 15 + 1 | ||||
|  | ||||
| // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. | ||||
| type PriorityWriteSchedulerConfig struct { | ||||
| 	// MaxClosedNodesInTree controls the maximum number of closed streams to | ||||
| 	// retain in the priority tree. Setting this to zero saves a small amount | ||||
| 	// of memory at the cost of performance. | ||||
| 	// | ||||
| 	// See RFC 7540, Section 5.3.4: | ||||
| 	//   "It is possible for a stream to become closed while prioritization | ||||
| 	//   information ... is in transit. ... This potentially creates suboptimal | ||||
| 	//   prioritization, since the stream could be given a priority that is | ||||
| 	//   different from what is intended. To avoid these problems, an endpoint | ||||
| 	//   SHOULD retain stream prioritization state for a period after streams | ||||
| 	//   become closed. The longer state is retained, the lower the chance that | ||||
| 	//   streams are assigned incorrect or default priority values." | ||||
| 	MaxClosedNodesInTree int | ||||
|  | ||||
| 	// MaxIdleNodesInTree controls the maximum number of idle streams to | ||||
| 	// retain in the priority tree. Setting this to zero saves a small amount | ||||
| 	// of memory at the cost of performance. | ||||
| 	// | ||||
| 	// See RFC 7540, Section 5.3.4: | ||||
| 	//   Similarly, streams that are in the "idle" state can be assigned | ||||
| 	//   priority or become a parent of other streams. This allows for the | ||||
| 	//   creation of a grouping node in the dependency tree, which enables | ||||
| 	//   more flexible expressions of priority. Idle streams begin with a | ||||
| 	//   default priority (Section 5.3.5). | ||||
| 	MaxIdleNodesInTree int | ||||
|  | ||||
| 	// ThrottleOutOfOrderWrites enables write throttling to help ensure that | ||||
| 	// data is delivered in priority order. This works around a race where | ||||
| 	// stream B depends on stream A and both streams are about to call Write | ||||
| 	// to queue DATA frames. If B wins the race, a naive scheduler would eagerly | ||||
| 	// write as much data from B as possible, but this is suboptimal because A | ||||
| 	// is a higher-priority stream. With throttling enabled, we write a small | ||||
| 	// amount of data from B to minimize the amount of bandwidth that B can | ||||
| 	// steal from A. | ||||
| 	ThrottleOutOfOrderWrites bool | ||||
| } | ||||
|  | ||||
| // NewPriorityWriteScheduler constructs a WriteScheduler that schedules | ||||
| // frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. | ||||
| // If cfg is nil, default options are used. | ||||
| func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { | ||||
| 	if cfg == nil { | ||||
| 		// For justification of these defaults, see: | ||||
| 		// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY | ||||
| 		cfg = &PriorityWriteSchedulerConfig{ | ||||
| 			MaxClosedNodesInTree:     10, | ||||
| 			MaxIdleNodesInTree:       10, | ||||
| 			ThrottleOutOfOrderWrites: false, | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	ws := &priorityWriteScheduler{ | ||||
| 		nodes:                make(map[uint32]*priorityNode), | ||||
| 		maxClosedNodesInTree: cfg.MaxClosedNodesInTree, | ||||
| 		maxIdleNodesInTree:   cfg.MaxIdleNodesInTree, | ||||
| 		enableWriteThrottle:  cfg.ThrottleOutOfOrderWrites, | ||||
| 	} | ||||
| 	ws.nodes[0] = &ws.root | ||||
| 	if cfg.ThrottleOutOfOrderWrites { | ||||
| 		ws.writeThrottleLimit = 1024 | ||||
| 	} else { | ||||
| 		ws.writeThrottleLimit = math.MaxInt32 | ||||
| 	} | ||||
| 	return ws | ||||
| } | ||||
|  | ||||
| type priorityNodeState int | ||||
|  | ||||
| const ( | ||||
| 	priorityNodeOpen priorityNodeState = iota | ||||
| 	priorityNodeClosed | ||||
| 	priorityNodeIdle | ||||
| ) | ||||
|  | ||||
| // priorityNode is a node in an HTTP/2 priority tree. | ||||
| // Each node is associated with a single stream ID. | ||||
| // See RFC 7540, Section 5.3. | ||||
| type priorityNode struct { | ||||
| 	q            writeQueue        // queue of pending frames to write | ||||
| 	id           uint32            // id of the stream, or 0 for the root of the tree | ||||
| 	weight       uint8             // the actual weight is weight+1, so the value is in [1,256] | ||||
| 	state        priorityNodeState // open | closed | idle | ||||
| 	bytes        int64             // number of bytes written by this node, or 0 if closed | ||||
| 	subtreeBytes int64             // sum(node.bytes) of all nodes in this subtree | ||||
|  | ||||
| 	// These links form the priority tree. | ||||
| 	parent     *priorityNode | ||||
| 	kids       *priorityNode // start of the kids list | ||||
| 	prev, next *priorityNode // doubly-linked list of siblings | ||||
| } | ||||
|  | ||||
| func (n *priorityNode) setParent(parent *priorityNode) { | ||||
| 	if n == parent { | ||||
| 		panic("setParent to self") | ||||
| 	} | ||||
| 	if n.parent == parent { | ||||
| 		return | ||||
| 	} | ||||
| 	// Unlink from current parent. | ||||
| 	if parent := n.parent; parent != nil { | ||||
| 		if n.prev == nil { | ||||
| 			parent.kids = n.next | ||||
| 		} else { | ||||
| 			n.prev.next = n.next | ||||
| 		} | ||||
| 		if n.next != nil { | ||||
| 			n.next.prev = n.prev | ||||
| 		} | ||||
| 	} | ||||
| 	// Link to new parent. | ||||
| 	// If parent=nil, remove n from the tree. | ||||
| 	// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). | ||||
| 	n.parent = parent | ||||
| 	if parent == nil { | ||||
| 		n.next = nil | ||||
| 		n.prev = nil | ||||
| 	} else { | ||||
| 		n.next = parent.kids | ||||
| 		n.prev = nil | ||||
| 		if n.next != nil { | ||||
| 			n.next.prev = n | ||||
| 		} | ||||
| 		parent.kids = n | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (n *priorityNode) addBytes(b int64) { | ||||
| 	n.bytes += b | ||||
| 	for ; n != nil; n = n.parent { | ||||
| 		n.subtreeBytes += b | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // walkReadyInOrder iterates over the tree in priority order, calling f for each node | ||||
| // with a non-empty write queue. When f returns true, this funcion returns true and the | ||||
| // walk halts. tmp is used as scratch space for sorting. | ||||
| // | ||||
| // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true | ||||
| // if any ancestor p of n is still open (ignoring the root node). | ||||
| func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { | ||||
| 	if !n.q.empty() && f(n, openParent) { | ||||
| 		return true | ||||
| 	} | ||||
| 	if n.kids == nil { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	// Don't consider the root "open" when updating openParent since | ||||
| 	// we can't send data frames on the root stream (only control frames). | ||||
| 	if n.id != 0 { | ||||
| 		openParent = openParent || (n.state == priorityNodeOpen) | ||||
| 	} | ||||
|  | ||||
| 	// Common case: only one kid or all kids have the same weight. | ||||
| 	// Some clients don't use weights; other clients (like web browsers) | ||||
| 	// use mostly-linear priority trees. | ||||
| 	w := n.kids.weight | ||||
| 	needSort := false | ||||
| 	for k := n.kids.next; k != nil; k = k.next { | ||||
| 		if k.weight != w { | ||||
| 			needSort = true | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if !needSort { | ||||
| 		for k := n.kids; k != nil; k = k.next { | ||||
| 			if k.walkReadyInOrder(openParent, tmp, f) { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	// Uncommon case: sort the child nodes. We remove the kids from the parent, | ||||
| 	// then re-insert after sorting so we can reuse tmp for future sort calls. | ||||
| 	*tmp = (*tmp)[:0] | ||||
| 	for n.kids != nil { | ||||
| 		*tmp = append(*tmp, n.kids) | ||||
| 		n.kids.setParent(nil) | ||||
| 	} | ||||
| 	sort.Sort(sortPriorityNodeSiblings(*tmp)) | ||||
| 	for i := len(*tmp) - 1; i >= 0; i-- { | ||||
| 		(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids | ||||
| 	} | ||||
| 	for k := n.kids; k != nil; k = k.next { | ||||
| 		if k.walkReadyInOrder(openParent, tmp, f) { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| type sortPriorityNodeSiblings []*priorityNode | ||||
|  | ||||
| func (z sortPriorityNodeSiblings) Len() int      { return len(z) } | ||||
| func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } | ||||
| func (z sortPriorityNodeSiblings) Less(i, k int) bool { | ||||
| 	// Prefer the subtree that has sent fewer bytes relative to its weight. | ||||
| 	// See sections 5.3.2 and 5.3.4. | ||||
| 	wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) | ||||
| 	wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) | ||||
| 	if bi == 0 && bk == 0 { | ||||
| 		return wi >= wk | ||||
| 	} | ||||
| 	if bk == 0 { | ||||
| 		return false | ||||
| 	} | ||||
| 	return bi/bk <= wi/wk | ||||
| } | ||||
|  | ||||
| type priorityWriteScheduler struct { | ||||
| 	// root is the root of the priority tree, where root.id = 0. | ||||
| 	// The root queues control frames that are not associated with any stream. | ||||
| 	root priorityNode | ||||
|  | ||||
| 	// nodes maps stream ids to priority tree nodes. | ||||
| 	nodes map[uint32]*priorityNode | ||||
|  | ||||
| 	// maxID is the maximum stream id in nodes. | ||||
| 	maxID uint32 | ||||
|  | ||||
| 	// lists of nodes that have been closed or are idle, but are kept in | ||||
| 	// the tree for improved prioritization. When the lengths exceed either | ||||
| 	// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. | ||||
| 	closedNodes, idleNodes []*priorityNode | ||||
|  | ||||
| 	// From the config. | ||||
| 	maxClosedNodesInTree int | ||||
| 	maxIdleNodesInTree   int | ||||
| 	writeThrottleLimit   int32 | ||||
| 	enableWriteThrottle  bool | ||||
|  | ||||
| 	// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. | ||||
| 	tmp []*priorityNode | ||||
|  | ||||
| 	// pool of empty queues for reuse. | ||||
| 	queuePool writeQueuePool | ||||
| } | ||||
|  | ||||
| func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { | ||||
| 	// The stream may be currently idle but cannot be opened or closed. | ||||
| 	if curr := ws.nodes[streamID]; curr != nil { | ||||
| 		if curr.state != priorityNodeIdle { | ||||
| 			panic(fmt.Sprintf("stream %d already opened", streamID)) | ||||
| 		} | ||||
| 		curr.state = priorityNodeOpen | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// RFC 7540, Section 5.3.5: | ||||
| 	//  "All streams are initially assigned a non-exclusive dependency on stream 0x0. | ||||
| 	//  Pushed streams initially depend on their associated stream. In both cases, | ||||
| 	//  streams are assigned a default weight of 16." | ||||
| 	parent := ws.nodes[options.PusherID] | ||||
| 	if parent == nil { | ||||
| 		parent = &ws.root | ||||
| 	} | ||||
| 	n := &priorityNode{ | ||||
| 		q:      *ws.queuePool.get(), | ||||
| 		id:     streamID, | ||||
| 		weight: priorityDefaultWeight, | ||||
| 		state:  priorityNodeOpen, | ||||
| 	} | ||||
| 	n.setParent(parent) | ||||
| 	ws.nodes[streamID] = n | ||||
| 	if streamID > ws.maxID { | ||||
| 		ws.maxID = streamID | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { | ||||
| 	if streamID == 0 { | ||||
| 		panic("violation of WriteScheduler interface: cannot close stream 0") | ||||
| 	} | ||||
| 	if ws.nodes[streamID] == nil { | ||||
| 		panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) | ||||
| 	} | ||||
| 	if ws.nodes[streamID].state != priorityNodeOpen { | ||||
| 		panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) | ||||
| 	} | ||||
|  | ||||
| 	n := ws.nodes[streamID] | ||||
| 	n.state = priorityNodeClosed | ||||
| 	n.addBytes(-n.bytes) | ||||
|  | ||||
| 	q := n.q | ||||
| 	ws.queuePool.put(&q) | ||||
| 	n.q.s = nil | ||||
| 	if ws.maxClosedNodesInTree > 0 { | ||||
| 		ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) | ||||
| 	} else { | ||||
| 		ws.removeNode(n) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { | ||||
| 	if streamID == 0 { | ||||
| 		panic("adjustPriority on root") | ||||
| 	} | ||||
|  | ||||
| 	// If streamID does not exist, there are two cases: | ||||
| 	// - A closed stream that has been removed (this will have ID <= maxID) | ||||
| 	// - An idle stream that is being used for "grouping" (this will have ID > maxID) | ||||
| 	n := ws.nodes[streamID] | ||||
| 	if n == nil { | ||||
| 		if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { | ||||
| 			return | ||||
| 		} | ||||
| 		ws.maxID = streamID | ||||
| 		n = &priorityNode{ | ||||
| 			q:      *ws.queuePool.get(), | ||||
| 			id:     streamID, | ||||
| 			weight: priorityDefaultWeight, | ||||
| 			state:  priorityNodeIdle, | ||||
| 		} | ||||
| 		n.setParent(&ws.root) | ||||
| 		ws.nodes[streamID] = n | ||||
| 		ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) | ||||
| 	} | ||||
|  | ||||
| 	// Section 5.3.1: A dependency on a stream that is not currently in the tree | ||||
| 	// results in that stream being given a default priority (Section 5.3.5). | ||||
| 	parent := ws.nodes[priority.StreamDep] | ||||
| 	if parent == nil { | ||||
| 		n.setParent(&ws.root) | ||||
| 		n.weight = priorityDefaultWeight | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Ignore if the client tries to make a node its own parent. | ||||
| 	if n == parent { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Section 5.3.3: | ||||
| 	//   "If a stream is made dependent on one of its own dependencies, the | ||||
| 	//   formerly dependent stream is first moved to be dependent on the | ||||
| 	//   reprioritized stream's previous parent. The moved dependency retains | ||||
| 	//   its weight." | ||||
| 	// | ||||
| 	// That is: if parent depends on n, move parent to depend on n.parent. | ||||
| 	for x := parent.parent; x != nil; x = x.parent { | ||||
| 		if x == n { | ||||
| 			parent.setParent(n.parent) | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Section 5.3.3: The exclusive flag causes the stream to become the sole | ||||
| 	// dependency of its parent stream, causing other dependencies to become | ||||
| 	// dependent on the exclusive stream. | ||||
| 	if priority.Exclusive { | ||||
| 		k := parent.kids | ||||
| 		for k != nil { | ||||
| 			next := k.next | ||||
| 			if k != n { | ||||
| 				k.setParent(n) | ||||
| 			} | ||||
| 			k = next | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	n.setParent(parent) | ||||
| 	n.weight = priority.Weight | ||||
| } | ||||
|  | ||||
| func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { | ||||
| 	var n *priorityNode | ||||
| 	if id := wr.StreamID(); id == 0 { | ||||
| 		n = &ws.root | ||||
| 	} else { | ||||
| 		n = ws.nodes[id] | ||||
| 		if n == nil { | ||||
| 			// id is an idle or closed stream. wr should not be a HEADERS or | ||||
| 			// DATA frame. However, wr can be a RST_STREAM. In this case, we | ||||
| 			// push wr onto the root, rather than creating a new priorityNode, | ||||
| 			// since RST_STREAM is tiny and the stream's priority is unknown | ||||
| 			// anyway. See issue #17919. | ||||
| 			if wr.DataSize() > 0 { | ||||
| 				panic("add DATA on non-open stream") | ||||
| 			} | ||||
| 			n = &ws.root | ||||
| 		} | ||||
| 	} | ||||
| 	n.q.push(wr) | ||||
| } | ||||
|  | ||||
| func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { | ||||
| 	ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { | ||||
| 		limit := int32(math.MaxInt32) | ||||
| 		if openParent { | ||||
| 			limit = ws.writeThrottleLimit | ||||
| 		} | ||||
| 		wr, ok = n.q.consume(limit) | ||||
| 		if !ok { | ||||
| 			return false | ||||
| 		} | ||||
| 		n.addBytes(int64(wr.DataSize())) | ||||
| 		// If B depends on A and B continuously has data available but A | ||||
| 		// does not, gradually increase the throttling limit to allow B to | ||||
| 		// steal more and more bandwidth from A. | ||||
| 		if openParent { | ||||
| 			ws.writeThrottleLimit += 1024 | ||||
| 			if ws.writeThrottleLimit < 0 { | ||||
| 				ws.writeThrottleLimit = math.MaxInt32 | ||||
| 			} | ||||
| 		} else if ws.enableWriteThrottle { | ||||
| 			ws.writeThrottleLimit = 1024 | ||||
| 		} | ||||
| 		return true | ||||
| 	}) | ||||
| 	return wr, ok | ||||
| } | ||||
|  | ||||
| func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { | ||||
| 	if maxSize == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	if len(*list) == maxSize { | ||||
| 		// Remove the oldest node, then shift left. | ||||
| 		ws.removeNode((*list)[0]) | ||||
| 		x := (*list)[1:] | ||||
| 		copy(*list, x) | ||||
| 		*list = (*list)[:len(x)] | ||||
| 	} | ||||
| 	*list = append(*list, n) | ||||
| } | ||||
|  | ||||
| func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { | ||||
| 	for k := n.kids; k != nil; k = k.next { | ||||
| 		k.setParent(n.parent) | ||||
| 	} | ||||
| 	n.setParent(nil) | ||||
| 	delete(ws.nodes, n.id) | ||||
| } | ||||
							
								
								
									
										72
									
								
								vendor/golang.org/x/net/http2/writesched_random.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								vendor/golang.org/x/net/http2/writesched_random.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package http2 | ||||
|  | ||||
| import "math" | ||||
|  | ||||
| // NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 | ||||
| // priorities. Control frames like SETTINGS and PING are written before DATA | ||||
| // frames, but if no control frames are queued and multiple streams have queued | ||||
| // HEADERS or DATA frames, Pop selects a ready stream arbitrarily. | ||||
| func NewRandomWriteScheduler() WriteScheduler { | ||||
| 	return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} | ||||
| } | ||||
|  | ||||
| type randomWriteScheduler struct { | ||||
| 	// zero are frames not associated with a specific stream. | ||||
| 	zero writeQueue | ||||
|  | ||||
| 	// sq contains the stream-specific queues, keyed by stream ID. | ||||
| 	// When a stream is idle or closed, it's deleted from the map. | ||||
| 	sq map[uint32]*writeQueue | ||||
|  | ||||
| 	// pool of empty queues for reuse. | ||||
| 	queuePool writeQueuePool | ||||
| } | ||||
|  | ||||
| func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { | ||||
| 	// no-op: idle streams are not tracked | ||||
| } | ||||
|  | ||||
| func (ws *randomWriteScheduler) CloseStream(streamID uint32) { | ||||
| 	q, ok := ws.sq[streamID] | ||||
| 	if !ok { | ||||
| 		return | ||||
| 	} | ||||
| 	delete(ws.sq, streamID) | ||||
| 	ws.queuePool.put(q) | ||||
| } | ||||
|  | ||||
| func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { | ||||
| 	// no-op: priorities are ignored | ||||
| } | ||||
|  | ||||
| func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { | ||||
| 	id := wr.StreamID() | ||||
| 	if id == 0 { | ||||
| 		ws.zero.push(wr) | ||||
| 		return | ||||
| 	} | ||||
| 	q, ok := ws.sq[id] | ||||
| 	if !ok { | ||||
| 		q = ws.queuePool.get() | ||||
| 		ws.sq[id] = q | ||||
| 	} | ||||
| 	q.push(wr) | ||||
| } | ||||
|  | ||||
| func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { | ||||
| 	// Control frames first. | ||||
| 	if !ws.zero.empty() { | ||||
| 		return ws.zero.shift(), true | ||||
| 	} | ||||
| 	// Iterate over all non-idle streams until finding one that can be consumed. | ||||
| 	for _, q := range ws.sq { | ||||
| 		if wr, ok := q.consume(math.MaxInt32); ok { | ||||
| 			return wr, true | ||||
| 		} | ||||
| 	} | ||||
| 	return FrameWriteRequest{}, false | ||||
| } | ||||
							
								
								
									
										672
									
								
								vendor/golang.org/x/net/idna/idna.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										672
									
								
								vendor/golang.org/x/net/idna/idna.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,61 +1,661 @@ | ||||
| // Copyright 2012 The Go Authors. All rights reserved. | ||||
| // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. | ||||
|  | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package idna implements IDNA2008 (Internationalized Domain Names for | ||||
| // Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and | ||||
| // RFC 5894. | ||||
| // Package idna implements IDNA2008 using the compatibility processing | ||||
| // defined by UTS (Unicode Technical Standard) #46, which defines a standard to | ||||
| // deal with the transition from IDNA2003. | ||||
| // | ||||
| // IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC | ||||
| // 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. | ||||
| // UTS #46 is defined in http://www.unicode.org/reports/tr46. | ||||
| // See http://unicode.org/cldr/utility/idna.jsp for a visualization of the | ||||
| // differences between these two standards. | ||||
| package idna // import "golang.org/x/net/idna" | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"golang.org/x/text/secure/bidirule" | ||||
| 	"golang.org/x/text/unicode/norm" | ||||
| ) | ||||
|  | ||||
| // TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or | ||||
| // ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 | ||||
| // NOTE: Unlike common practice in Go APIs, the functions will return a | ||||
| // sanitized domain name in case of errors. Browsers sometimes use a partially | ||||
| // evaluated string as lookup. | ||||
| // TODO: the current error handling is, in my opinion, the least opinionated. | ||||
| // Other strategies are also viable, though: | ||||
| // Option 1) Return an empty string in case of error, but allow the user to | ||||
| //    specify explicitly which errors to ignore. | ||||
| // Option 2) Return the partially evaluated string if it is itself a valid | ||||
| //    string, otherwise return the empty string in case of error. | ||||
| // Option 3) Option 1 and 2. | ||||
| // Option 4) Always return an empty string for now and implement Option 1 as | ||||
| //    needed, and document that the return string may not be empty in case of | ||||
| //    error in the future. | ||||
| // I think Option 1 is best, but it is quite opinionated. | ||||
|  | ||||
| // acePrefix is the ASCII Compatible Encoding prefix. | ||||
| const acePrefix = "xn--" | ||||
| // ToASCII is a wrapper for Punycode.ToASCII. | ||||
| func ToASCII(s string) (string, error) { | ||||
| 	return Punycode.process(s, true) | ||||
| } | ||||
|  | ||||
| // ToUnicode is a wrapper for Punycode.ToUnicode. | ||||
| func ToUnicode(s string) (string, error) { | ||||
| 	return Punycode.process(s, false) | ||||
| } | ||||
|  | ||||
| // An Option configures a Profile at creation time. | ||||
| type Option func(*options) | ||||
|  | ||||
| // Transitional sets a Profile to use the Transitional mapping as defined in UTS | ||||
| // #46. This will cause, for example, "ß" to be mapped to "ss". Using the | ||||
| // transitional mapping provides a compromise between IDNA2003 and IDNA2008 | ||||
| // compatibility. It is used by most browsers when resolving domain names. This | ||||
| // option is only meaningful if combined with MapForLookup. | ||||
| func Transitional(transitional bool) Option { | ||||
| 	return func(o *options) { o.transitional = true } | ||||
| } | ||||
|  | ||||
| // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts | ||||
| // are longer than allowed by the RFC. | ||||
| func VerifyDNSLength(verify bool) Option { | ||||
| 	return func(o *options) { o.verifyDNSLength = verify } | ||||
| } | ||||
|  | ||||
| // ValidateLabels sets whether to check the mandatory label validation criteria | ||||
| // as defined in Section 5.4 of RFC 5891. This includes testing for correct use | ||||
| // of hyphens ('-'), normalization, validity of runes, and the context rules. | ||||
| func ValidateLabels(enable bool) Option { | ||||
| 	return func(o *options) { | ||||
| 		// Don't override existing mappings, but set one that at least checks | ||||
| 		// normalization if it is not set. | ||||
| 		if o.mapping == nil && enable { | ||||
| 			o.mapping = normalize | ||||
| 		} | ||||
| 		o.trie = trie | ||||
| 		o.validateLabels = enable | ||||
| 		o.fromPuny = validateFromPunycode | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // StrictDomainName limits the set of permissable ASCII characters to those | ||||
| // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the | ||||
| // hyphen). This is set by default for MapForLookup and ValidateForRegistration. | ||||
| // | ||||
| // This option is useful, for instance, for browsers that allow characters | ||||
| // outside this range, for example a '_' (U+005F LOW LINE). See | ||||
| // http://www.rfc-editor.org/std/std3.txt for more details This option | ||||
| // corresponds to the UseSTD3ASCIIRules option in UTS #46. | ||||
| func StrictDomainName(use bool) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.trie = trie | ||||
| 		o.useSTD3Rules = use | ||||
| 		o.fromPuny = validateFromPunycode | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NOTE: the following options pull in tables. The tables should not be linked | ||||
| // in as long as the options are not used. | ||||
|  | ||||
| // BidiRule enables the Bidi rule as defined in RFC 5893. Any application | ||||
| // that relies on proper validation of labels should include this rule. | ||||
| func BidiRule() Option { | ||||
| 	return func(o *options) { o.bidirule = bidirule.ValidString } | ||||
| } | ||||
|  | ||||
| // ValidateForRegistration sets validation options to verify that a given IDN is | ||||
| // properly formatted for registration as defined by Section 4 of RFC 5891. | ||||
| func ValidateForRegistration() Option { | ||||
| 	return func(o *options) { | ||||
| 		o.mapping = validateRegistration | ||||
| 		StrictDomainName(true)(o) | ||||
| 		ValidateLabels(true)(o) | ||||
| 		VerifyDNSLength(true)(o) | ||||
| 		BidiRule()(o) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // MapForLookup sets validation and mapping options such that a given IDN is | ||||
| // transformed for domain name lookup according to the requirements set out in | ||||
| // Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, | ||||
| // RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option | ||||
| // to add this check. | ||||
| // | ||||
| // The mappings include normalization and mapping case, width and other | ||||
| // compatibility mappings. | ||||
| func MapForLookup() Option { | ||||
| 	return func(o *options) { | ||||
| 		o.mapping = validateAndMap | ||||
| 		StrictDomainName(true)(o) | ||||
| 		ValidateLabels(true)(o) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type options struct { | ||||
| 	transitional    bool | ||||
| 	useSTD3Rules    bool | ||||
| 	validateLabels  bool | ||||
| 	verifyDNSLength bool | ||||
|  | ||||
| 	trie *idnaTrie | ||||
|  | ||||
| 	// fromPuny calls validation rules when converting A-labels to U-labels. | ||||
| 	fromPuny func(p *Profile, s string) error | ||||
|  | ||||
| 	// mapping implements a validation and mapping step as defined in RFC 5895 | ||||
| 	// or UTS 46, tailored to, for example, domain registration or lookup. | ||||
| 	mapping func(p *Profile, s string) (string, error) | ||||
|  | ||||
| 	// bidirule, if specified, checks whether s conforms to the Bidi Rule | ||||
| 	// defined in RFC 5893. | ||||
| 	bidirule func(s string) bool | ||||
| } | ||||
|  | ||||
| // A Profile defines the configuration of a IDNA mapper. | ||||
| type Profile struct { | ||||
| 	options | ||||
| } | ||||
|  | ||||
| func apply(o *options, opts []Option) { | ||||
| 	for _, f := range opts { | ||||
| 		f(o) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // New creates a new Profile. | ||||
| // | ||||
| // With no options, the returned Profile is the most permissive and equals the | ||||
| // Punycode Profile. Options can be passed to further restrict the Profile. The | ||||
| // MapForLookup and ValidateForRegistration options set a collection of options, | ||||
| // for lookup and registration purposes respectively, which can be tailored by | ||||
| // adding more fine-grained options, where later options override earlier | ||||
| // options. | ||||
| func New(o ...Option) *Profile { | ||||
| 	p := &Profile{} | ||||
| 	apply(&p.options, o) | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| // ToASCII converts a domain or domain label to its ASCII form. For example, | ||||
| // ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and | ||||
| // ToASCII("golang") is "golang". | ||||
| func ToASCII(s string) (string, error) { | ||||
| 	if ascii(s) { | ||||
| 		return s, nil | ||||
| 	} | ||||
| 	labels := strings.Split(s, ".") | ||||
| 	for i, label := range labels { | ||||
| 		if !ascii(label) { | ||||
| 			a, err := encode(acePrefix, label) | ||||
| 			if err != nil { | ||||
| 				return "", err | ||||
| 			} | ||||
| 			labels[i] = a | ||||
| 		} | ||||
| 	} | ||||
| 	return strings.Join(labels, "."), nil | ||||
| // ToASCII("golang") is "golang". If an error is encountered it will return | ||||
| // an error and a (partially) processed result. | ||||
| func (p *Profile) ToASCII(s string) (string, error) { | ||||
| 	return p.process(s, true) | ||||
| } | ||||
|  | ||||
| // ToUnicode converts a domain or domain label to its Unicode form. For example, | ||||
| // ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and | ||||
| // ToUnicode("golang") is "golang". | ||||
| func ToUnicode(s string) (string, error) { | ||||
| 	if !strings.Contains(s, acePrefix) { | ||||
| 		return s, nil | ||||
| // ToUnicode("golang") is "golang". If an error is encountered it will return | ||||
| // an error and a (partially) processed result. | ||||
| func (p *Profile) ToUnicode(s string) (string, error) { | ||||
| 	pp := *p | ||||
| 	pp.transitional = false | ||||
| 	return pp.process(s, false) | ||||
| } | ||||
|  | ||||
| // String reports a string with a description of the profile for debugging | ||||
| // purposes. The string format may change with different versions. | ||||
| func (p *Profile) String() string { | ||||
| 	s := "" | ||||
| 	if p.transitional { | ||||
| 		s = "Transitional" | ||||
| 	} else { | ||||
| 		s = "NonTransitional" | ||||
| 	} | ||||
| 	labels := strings.Split(s, ".") | ||||
| 	for i, label := range labels { | ||||
| 		if strings.HasPrefix(label, acePrefix) { | ||||
| 			u, err := decode(label[len(acePrefix):]) | ||||
| 			if err != nil { | ||||
| 				return "", err | ||||
| 	if p.useSTD3Rules { | ||||
| 		s += ":UseSTD3Rules" | ||||
| 	} | ||||
| 	if p.validateLabels { | ||||
| 		s += ":ValidateLabels" | ||||
| 	} | ||||
| 	if p.verifyDNSLength { | ||||
| 		s += ":VerifyDNSLength" | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	// Punycode is a Profile that does raw punycode processing with a minimum | ||||
| 	// of validation. | ||||
| 	Punycode *Profile = punycode | ||||
|  | ||||
| 	// Lookup is the recommended profile for looking up domain names, according | ||||
| 	// to Section 5 of RFC 5891. The exact configuration of this profile may | ||||
| 	// change over time. | ||||
| 	Lookup *Profile = lookup | ||||
|  | ||||
| 	// Display is the recommended profile for displaying domain names. | ||||
| 	// The configuration of this profile may change over time. | ||||
| 	Display *Profile = display | ||||
|  | ||||
| 	// Registration is the recommended profile for checking whether a given | ||||
| 	// IDN is valid for registration, according to Section 4 of RFC 5891. | ||||
| 	Registration *Profile = registration | ||||
|  | ||||
| 	punycode = &Profile{} | ||||
| 	lookup   = &Profile{options{ | ||||
| 		transitional:   true, | ||||
| 		useSTD3Rules:   true, | ||||
| 		validateLabels: true, | ||||
| 		trie:           trie, | ||||
| 		fromPuny:       validateFromPunycode, | ||||
| 		mapping:        validateAndMap, | ||||
| 		bidirule:       bidirule.ValidString, | ||||
| 	}} | ||||
| 	display = &Profile{options{ | ||||
| 		useSTD3Rules:   true, | ||||
| 		validateLabels: true, | ||||
| 		trie:           trie, | ||||
| 		fromPuny:       validateFromPunycode, | ||||
| 		mapping:        validateAndMap, | ||||
| 		bidirule:       bidirule.ValidString, | ||||
| 	}} | ||||
| 	registration = &Profile{options{ | ||||
| 		useSTD3Rules:    true, | ||||
| 		validateLabels:  true, | ||||
| 		verifyDNSLength: true, | ||||
| 		trie:            trie, | ||||
| 		fromPuny:        validateFromPunycode, | ||||
| 		mapping:         validateRegistration, | ||||
| 		bidirule:        bidirule.ValidString, | ||||
| 	}} | ||||
|  | ||||
| 	// TODO: profiles | ||||
| 	// Register: recommended for approving domain names: don't do any mappings | ||||
| 	// but rather reject on invalid input. Bundle or block deviation characters. | ||||
| ) | ||||
|  | ||||
| type labelError struct{ label, code_ string } | ||||
|  | ||||
| func (e labelError) code() string { return e.code_ } | ||||
| func (e labelError) Error() string { | ||||
| 	return fmt.Sprintf("idna: invalid label %q", e.label) | ||||
| } | ||||
|  | ||||
| type runeError rune | ||||
|  | ||||
| func (e runeError) code() string { return "P1" } | ||||
| func (e runeError) Error() string { | ||||
| 	return fmt.Sprintf("idna: disallowed rune %U", e) | ||||
| } | ||||
|  | ||||
| // process implements the algorithm described in section 4 of UTS #46, | ||||
| // see http://www.unicode.org/reports/tr46. | ||||
| func (p *Profile) process(s string, toASCII bool) (string, error) { | ||||
| 	var err error | ||||
| 	if p.mapping != nil { | ||||
| 		s, err = p.mapping(p, s) | ||||
| 	} | ||||
| 	// Remove leading empty labels. | ||||
| 	for ; len(s) > 0 && s[0] == '.'; s = s[1:] { | ||||
| 	} | ||||
| 	// It seems like we should only create this error on ToASCII, but the | ||||
| 	// UTS 46 conformance tests suggests we should always check this. | ||||
| 	if err == nil && p.verifyDNSLength && s == "" { | ||||
| 		err = &labelError{s, "A4"} | ||||
| 	} | ||||
| 	labels := labelIter{orig: s} | ||||
| 	for ; !labels.done(); labels.next() { | ||||
| 		label := labels.label() | ||||
| 		if label == "" { | ||||
| 			// Empty labels are not okay. The label iterator skips the last | ||||
| 			// label if it is empty. | ||||
| 			if err == nil && p.verifyDNSLength { | ||||
| 				err = &labelError{s, "A4"} | ||||
| 			} | ||||
| 			labels[i] = u | ||||
| 			continue | ||||
| 		} | ||||
| 		if strings.HasPrefix(label, acePrefix) { | ||||
| 			u, err2 := decode(label[len(acePrefix):]) | ||||
| 			if err2 != nil { | ||||
| 				if err == nil { | ||||
| 					err = err2 | ||||
| 				} | ||||
| 				// Spec says keep the old label. | ||||
| 				continue | ||||
| 			} | ||||
| 			labels.set(u) | ||||
| 			if err == nil && p.validateLabels { | ||||
| 				err = p.fromPuny(p, u) | ||||
| 			} | ||||
| 			if err == nil { | ||||
| 				// This should be called on NonTransitional, according to the | ||||
| 				// spec, but that currently does not have any effect. Use the | ||||
| 				// original profile to preserve options. | ||||
| 				err = p.validateLabel(u) | ||||
| 			} | ||||
| 		} else if err == nil { | ||||
| 			err = p.validateLabel(label) | ||||
| 		} | ||||
| 	} | ||||
| 	return strings.Join(labels, "."), nil | ||||
| 	if toASCII { | ||||
| 		for labels.reset(); !labels.done(); labels.next() { | ||||
| 			label := labels.label() | ||||
| 			if !ascii(label) { | ||||
| 				a, err2 := encode(acePrefix, label) | ||||
| 				if err == nil { | ||||
| 					err = err2 | ||||
| 				} | ||||
| 				label = a | ||||
| 				labels.set(a) | ||||
| 			} | ||||
| 			n := len(label) | ||||
| 			if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { | ||||
| 				err = &labelError{label, "A4"} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	s = labels.result() | ||||
| 	if toASCII && p.verifyDNSLength && err == nil { | ||||
| 		// Compute the length of the domain name minus the root label and its dot. | ||||
| 		n := len(s) | ||||
| 		if n > 0 && s[n-1] == '.' { | ||||
| 			n-- | ||||
| 		} | ||||
| 		if len(s) < 1 || n > 253 { | ||||
| 			err = &labelError{s, "A4"} | ||||
| 		} | ||||
| 	} | ||||
| 	return s, err | ||||
| } | ||||
|  | ||||
| func normalize(p *Profile, s string) (string, error) { | ||||
| 	return norm.NFC.String(s), nil | ||||
| } | ||||
|  | ||||
| func validateRegistration(p *Profile, s string) (string, error) { | ||||
| 	if !norm.NFC.IsNormalString(s) { | ||||
| 		return s, &labelError{s, "V1"} | ||||
| 	} | ||||
| 	var err error | ||||
| 	for i := 0; i < len(s); { | ||||
| 		v, sz := trie.lookupString(s[i:]) | ||||
| 		i += sz | ||||
| 		// Copy bytes not copied so far. | ||||
| 		switch p.simplify(info(v).category()) { | ||||
| 		// TODO: handle the NV8 defined in the Unicode idna data set to allow | ||||
| 		// for strict conformance to IDNA2008. | ||||
| 		case valid, deviation: | ||||
| 		case disallowed, mapped, unknown, ignored: | ||||
| 			if err == nil { | ||||
| 				r, _ := utf8.DecodeRuneInString(s[i:]) | ||||
| 				err = runeError(r) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return s, err | ||||
| } | ||||
|  | ||||
| func validateAndMap(p *Profile, s string) (string, error) { | ||||
| 	var ( | ||||
| 		err error | ||||
| 		b   []byte | ||||
| 		k   int | ||||
| 	) | ||||
| 	for i := 0; i < len(s); { | ||||
| 		v, sz := trie.lookupString(s[i:]) | ||||
| 		start := i | ||||
| 		i += sz | ||||
| 		// Copy bytes not copied so far. | ||||
| 		switch p.simplify(info(v).category()) { | ||||
| 		case valid: | ||||
| 			continue | ||||
| 		case disallowed: | ||||
| 			if err == nil { | ||||
| 				r, _ := utf8.DecodeRuneInString(s[i:]) | ||||
| 				err = runeError(r) | ||||
| 			} | ||||
| 			continue | ||||
| 		case mapped, deviation: | ||||
| 			b = append(b, s[k:start]...) | ||||
| 			b = info(v).appendMapping(b, s[start:i]) | ||||
| 		case ignored: | ||||
| 			b = append(b, s[k:start]...) | ||||
| 			// drop the rune | ||||
| 		case unknown: | ||||
| 			b = append(b, s[k:start]...) | ||||
| 			b = append(b, "\ufffd"...) | ||||
| 		} | ||||
| 		k = i | ||||
| 	} | ||||
| 	if k == 0 { | ||||
| 		// No changes so far. | ||||
| 		s = norm.NFC.String(s) | ||||
| 	} else { | ||||
| 		b = append(b, s[k:]...) | ||||
| 		if norm.NFC.QuickSpan(b) != len(b) { | ||||
| 			b = norm.NFC.Bytes(b) | ||||
| 		} | ||||
| 		// TODO: the punycode converters require strings as input. | ||||
| 		s = string(b) | ||||
| 	} | ||||
| 	return s, err | ||||
| } | ||||
|  | ||||
| // A labelIter allows iterating over domain name labels. | ||||
| type labelIter struct { | ||||
| 	orig     string | ||||
| 	slice    []string | ||||
| 	curStart int | ||||
| 	curEnd   int | ||||
| 	i        int | ||||
| } | ||||
|  | ||||
| func (l *labelIter) reset() { | ||||
| 	l.curStart = 0 | ||||
| 	l.curEnd = 0 | ||||
| 	l.i = 0 | ||||
| } | ||||
|  | ||||
| func (l *labelIter) done() bool { | ||||
| 	return l.curStart >= len(l.orig) | ||||
| } | ||||
|  | ||||
| func (l *labelIter) result() string { | ||||
| 	if l.slice != nil { | ||||
| 		return strings.Join(l.slice, ".") | ||||
| 	} | ||||
| 	return l.orig | ||||
| } | ||||
|  | ||||
| func (l *labelIter) label() string { | ||||
| 	if l.slice != nil { | ||||
| 		return l.slice[l.i] | ||||
| 	} | ||||
| 	p := strings.IndexByte(l.orig[l.curStart:], '.') | ||||
| 	l.curEnd = l.curStart + p | ||||
| 	if p == -1 { | ||||
| 		l.curEnd = len(l.orig) | ||||
| 	} | ||||
| 	return l.orig[l.curStart:l.curEnd] | ||||
| } | ||||
|  | ||||
| // next sets the value to the next label. It skips the last label if it is empty. | ||||
| func (l *labelIter) next() { | ||||
| 	l.i++ | ||||
| 	if l.slice != nil { | ||||
| 		if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { | ||||
| 			l.curStart = len(l.orig) | ||||
| 		} | ||||
| 	} else { | ||||
| 		l.curStart = l.curEnd + 1 | ||||
| 		if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { | ||||
| 			l.curStart = len(l.orig) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (l *labelIter) set(s string) { | ||||
| 	if l.slice == nil { | ||||
| 		l.slice = strings.Split(l.orig, ".") | ||||
| 	} | ||||
| 	l.slice[l.i] = s | ||||
| } | ||||
|  | ||||
| // acePrefix is the ASCII Compatible Encoding prefix. | ||||
| const acePrefix = "xn--" | ||||
|  | ||||
| func (p *Profile) simplify(cat category) category { | ||||
| 	switch cat { | ||||
| 	case disallowedSTD3Mapped: | ||||
| 		if p.useSTD3Rules { | ||||
| 			cat = disallowed | ||||
| 		} else { | ||||
| 			cat = mapped | ||||
| 		} | ||||
| 	case disallowedSTD3Valid: | ||||
| 		if p.useSTD3Rules { | ||||
| 			cat = disallowed | ||||
| 		} else { | ||||
| 			cat = valid | ||||
| 		} | ||||
| 	case deviation: | ||||
| 		if !p.transitional { | ||||
| 			cat = valid | ||||
| 		} | ||||
| 	case validNV8, validXV8: | ||||
| 		// TODO: handle V2008 | ||||
| 		cat = valid | ||||
| 	} | ||||
| 	return cat | ||||
| } | ||||
|  | ||||
| func validateFromPunycode(p *Profile, s string) error { | ||||
| 	if !norm.NFC.IsNormalString(s) { | ||||
| 		return &labelError{s, "V1"} | ||||
| 	} | ||||
| 	for i := 0; i < len(s); { | ||||
| 		v, sz := trie.lookupString(s[i:]) | ||||
| 		if c := p.simplify(info(v).category()); c != valid && c != deviation { | ||||
| 			return &labelError{s, "V6"} | ||||
| 		} | ||||
| 		i += sz | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	zwnj = "\u200c" | ||||
| 	zwj  = "\u200d" | ||||
| ) | ||||
|  | ||||
| type joinState int8 | ||||
|  | ||||
| const ( | ||||
| 	stateStart joinState = iota | ||||
| 	stateVirama | ||||
| 	stateBefore | ||||
| 	stateBeforeVirama | ||||
| 	stateAfter | ||||
| 	stateFAIL | ||||
| ) | ||||
|  | ||||
| var joinStates = [][numJoinTypes]joinState{ | ||||
| 	stateStart: { | ||||
| 		joiningL:   stateBefore, | ||||
| 		joiningD:   stateBefore, | ||||
| 		joinZWNJ:   stateFAIL, | ||||
| 		joinZWJ:    stateFAIL, | ||||
| 		joinVirama: stateVirama, | ||||
| 	}, | ||||
| 	stateVirama: { | ||||
| 		joiningL: stateBefore, | ||||
| 		joiningD: stateBefore, | ||||
| 	}, | ||||
| 	stateBefore: { | ||||
| 		joiningL:   stateBefore, | ||||
| 		joiningD:   stateBefore, | ||||
| 		joiningT:   stateBefore, | ||||
| 		joinZWNJ:   stateAfter, | ||||
| 		joinZWJ:    stateFAIL, | ||||
| 		joinVirama: stateBeforeVirama, | ||||
| 	}, | ||||
| 	stateBeforeVirama: { | ||||
| 		joiningL: stateBefore, | ||||
| 		joiningD: stateBefore, | ||||
| 		joiningT: stateBefore, | ||||
| 	}, | ||||
| 	stateAfter: { | ||||
| 		joiningL:   stateFAIL, | ||||
| 		joiningD:   stateBefore, | ||||
| 		joiningT:   stateAfter, | ||||
| 		joiningR:   stateStart, | ||||
| 		joinZWNJ:   stateFAIL, | ||||
| 		joinZWJ:    stateFAIL, | ||||
| 		joinVirama: stateAfter, // no-op as we can't accept joiners here | ||||
| 	}, | ||||
| 	stateFAIL: { | ||||
| 		0:          stateFAIL, | ||||
| 		joiningL:   stateFAIL, | ||||
| 		joiningD:   stateFAIL, | ||||
| 		joiningT:   stateFAIL, | ||||
| 		joiningR:   stateFAIL, | ||||
| 		joinZWNJ:   stateFAIL, | ||||
| 		joinZWJ:    stateFAIL, | ||||
| 		joinVirama: stateFAIL, | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| // validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are | ||||
| // already implicitly satisfied by the overall implementation. | ||||
| func (p *Profile) validateLabel(s string) error { | ||||
| 	if s == "" { | ||||
| 		if p.verifyDNSLength { | ||||
| 			return &labelError{s, "A4"} | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	if p.bidirule != nil && !p.bidirule(s) { | ||||
| 		return &labelError{s, "B"} | ||||
| 	} | ||||
| 	if !p.validateLabels { | ||||
| 		return nil | ||||
| 	} | ||||
| 	trie := p.trie // p.validateLabels is only set if trie is set. | ||||
| 	if len(s) > 4 && s[2] == '-' && s[3] == '-' { | ||||
| 		return &labelError{s, "V2"} | ||||
| 	} | ||||
| 	if s[0] == '-' || s[len(s)-1] == '-' { | ||||
| 		return &labelError{s, "V3"} | ||||
| 	} | ||||
| 	// TODO: merge the use of this in the trie. | ||||
| 	v, sz := trie.lookupString(s) | ||||
| 	x := info(v) | ||||
| 	if x.isModifier() { | ||||
| 		return &labelError{s, "V5"} | ||||
| 	} | ||||
| 	// Quickly return in the absence of zero-width (non) joiners. | ||||
| 	if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	st := stateStart | ||||
| 	for i := 0; ; { | ||||
| 		jt := x.joinType() | ||||
| 		if s[i:i+sz] == zwj { | ||||
| 			jt = joinZWJ | ||||
| 		} else if s[i:i+sz] == zwnj { | ||||
| 			jt = joinZWNJ | ||||
| 		} | ||||
| 		st = joinStates[st][jt] | ||||
| 		if x.isViramaModifier() { | ||||
| 			st = joinStates[st][joinVirama] | ||||
| 		} | ||||
| 		if i += sz; i == len(s) { | ||||
| 			break | ||||
| 		} | ||||
| 		v, sz = trie.lookupString(s[i:]) | ||||
| 		x = info(v) | ||||
| 	} | ||||
| 	if st == stateFAIL || st == stateAfter { | ||||
| 		return &labelError{s, "C"} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func ascii(s string) bool { | ||||
|   | ||||
							
								
								
									
										23
									
								
								vendor/golang.org/x/net/idna/punycode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								vendor/golang.org/x/net/idna/punycode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,4 +1,6 @@ | ||||
| // Copyright 2012 The Go Authors. All rights reserved. | ||||
| // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. | ||||
|  | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| @@ -7,7 +9,6 @@ package idna | ||||
| // This file implements the Punycode algorithm from RFC 3492. | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"strings" | ||||
| 	"unicode/utf8" | ||||
| @@ -27,6 +28,8 @@ const ( | ||||
| 	tmin        int32 = 1 | ||||
| ) | ||||
|  | ||||
| func punyError(s string) error { return &labelError{s, "A3"} } | ||||
|  | ||||
| // decode decodes a string as specified in section 6.2. | ||||
| func decode(encoded string) (string, error) { | ||||
| 	if encoded == "" { | ||||
| @@ -34,7 +37,7 @@ func decode(encoded string) (string, error) { | ||||
| 	} | ||||
| 	pos := 1 + strings.LastIndex(encoded, "-") | ||||
| 	if pos == 1 { | ||||
| 		return "", fmt.Errorf("idna: invalid label %q", encoded) | ||||
| 		return "", punyError(encoded) | ||||
| 	} | ||||
| 	if pos == len(encoded) { | ||||
| 		return encoded[:len(encoded)-1], nil | ||||
| @@ -50,16 +53,16 @@ func decode(encoded string) (string, error) { | ||||
| 		oldI, w := i, int32(1) | ||||
| 		for k := base; ; k += base { | ||||
| 			if pos == len(encoded) { | ||||
| 				return "", fmt.Errorf("idna: invalid label %q", encoded) | ||||
| 				return "", punyError(encoded) | ||||
| 			} | ||||
| 			digit, ok := decodeDigit(encoded[pos]) | ||||
| 			if !ok { | ||||
| 				return "", fmt.Errorf("idna: invalid label %q", encoded) | ||||
| 				return "", punyError(encoded) | ||||
| 			} | ||||
| 			pos++ | ||||
| 			i += digit * w | ||||
| 			if i < 0 { | ||||
| 				return "", fmt.Errorf("idna: invalid label %q", encoded) | ||||
| 				return "", punyError(encoded) | ||||
| 			} | ||||
| 			t := k - bias | ||||
| 			if t < tmin { | ||||
| @@ -72,7 +75,7 @@ func decode(encoded string) (string, error) { | ||||
| 			} | ||||
| 			w *= base - t | ||||
| 			if w >= math.MaxInt32/base { | ||||
| 				return "", fmt.Errorf("idna: invalid label %q", encoded) | ||||
| 				return "", punyError(encoded) | ||||
| 			} | ||||
| 		} | ||||
| 		x := int32(len(output) + 1) | ||||
| @@ -80,7 +83,7 @@ func decode(encoded string) (string, error) { | ||||
| 		n += i / x | ||||
| 		i %= x | ||||
| 		if n > utf8.MaxRune || len(output) >= 1024 { | ||||
| 			return "", fmt.Errorf("idna: invalid label %q", encoded) | ||||
| 			return "", punyError(encoded) | ||||
| 		} | ||||
| 		output = append(output, 0) | ||||
| 		copy(output[i+1:], output[i:]) | ||||
| @@ -121,14 +124,14 @@ func encode(prefix, s string) (string, error) { | ||||
| 		} | ||||
| 		delta += (m - n) * (h + 1) | ||||
| 		if delta < 0 { | ||||
| 			return "", fmt.Errorf("idna: invalid label %q", s) | ||||
| 			return "", punyError(s) | ||||
| 		} | ||||
| 		n = m | ||||
| 		for _, r := range s { | ||||
| 			if r < n { | ||||
| 				delta++ | ||||
| 				if delta < 0 { | ||||
| 					return "", fmt.Errorf("idna: invalid label %q", s) | ||||
| 					return "", punyError(s) | ||||
| 				} | ||||
| 				continue | ||||
| 			} | ||||
|   | ||||
							
								
								
									
										4477
									
								
								vendor/golang.org/x/net/idna/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4477
									
								
								vendor/golang.org/x/net/idna/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										72
									
								
								vendor/golang.org/x/net/idna/trie.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								vendor/golang.org/x/net/idna/trie.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | ||||
| // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. | ||||
|  | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package idna | ||||
|  | ||||
| // appendMapping appends the mapping for the respective rune. isMapped must be | ||||
| // true. A mapping is a categorization of a rune as defined in UTS #46. | ||||
| func (c info) appendMapping(b []byte, s string) []byte { | ||||
| 	index := int(c >> indexShift) | ||||
| 	if c&xorBit == 0 { | ||||
| 		s := mappings[index:] | ||||
| 		return append(b, s[1:s[0]+1]...) | ||||
| 	} | ||||
| 	b = append(b, s...) | ||||
| 	if c&inlineXOR == inlineXOR { | ||||
| 		// TODO: support and handle two-byte inline masks | ||||
| 		b[len(b)-1] ^= byte(index) | ||||
| 	} else { | ||||
| 		for p := len(b) - int(xorData[index]); p < len(b); p++ { | ||||
| 			index++ | ||||
| 			b[p] ^= xorData[index] | ||||
| 		} | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
|  | ||||
| // Sparse block handling code. | ||||
|  | ||||
| type valueRange struct { | ||||
| 	value  uint16 // header: value:stride | ||||
| 	lo, hi byte   // header: lo:n | ||||
| } | ||||
|  | ||||
| type sparseBlocks struct { | ||||
| 	values []valueRange | ||||
| 	offset []uint16 | ||||
| } | ||||
|  | ||||
| var idnaSparse = sparseBlocks{ | ||||
| 	values: idnaSparseValues[:], | ||||
| 	offset: idnaSparseOffset[:], | ||||
| } | ||||
|  | ||||
| // Don't use newIdnaTrie to avoid unconditional linking in of the table. | ||||
| var trie = &idnaTrie{} | ||||
|  | ||||
| // lookup determines the type of block n and looks up the value for b. | ||||
| // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block | ||||
| // is a list of ranges with an accompanying value. Given a matching range r, | ||||
| // the value for b is by r.value + (b - r.lo) * stride. | ||||
| func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { | ||||
| 	offset := t.offset[n] | ||||
| 	header := t.values[offset] | ||||
| 	lo := offset + 1 | ||||
| 	hi := lo + uint16(header.lo) | ||||
| 	for lo < hi { | ||||
| 		m := lo + (hi-lo)/2 | ||||
| 		r := t.values[m] | ||||
| 		if r.lo <= b && b <= r.hi { | ||||
| 			return r.value + uint16(b-r.lo)*header.value | ||||
| 		} | ||||
| 		if b < r.lo { | ||||
| 			hi = m | ||||
| 		} else { | ||||
| 			lo = m + 1 | ||||
| 		} | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
							
								
								
									
										114
									
								
								vendor/golang.org/x/net/idna/trieval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										114
									
								
								vendor/golang.org/x/net/idna/trieval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,114 @@ | ||||
| // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. | ||||
|  | ||||
| package idna | ||||
|  | ||||
| // This file contains definitions for interpreting the trie value of the idna | ||||
| // trie generated by "go run gen*.go". It is shared by both the generator | ||||
| // program and the resultant package. Sharing is achieved by the generator | ||||
| // copying gen_trieval.go to trieval.go and changing what's above this comment. | ||||
|  | ||||
| // info holds information from the IDNA mapping table for a single rune. It is | ||||
| // the value returned by a trie lookup. In most cases, all information fits in | ||||
| // a 16-bit value. For mappings, this value may contain an index into a slice | ||||
| // with the mapped string. Such mappings can consist of the actual mapped value | ||||
| // or an XOR pattern to be applied to the bytes of the UTF8 encoding of the | ||||
| // input rune. This technique is used by the cases packages and reduces the | ||||
| // table size significantly. | ||||
| // | ||||
| // The per-rune values have the following format: | ||||
| // | ||||
| //   if mapped { | ||||
| //     if inlinedXOR { | ||||
| //       15..13 inline XOR marker | ||||
| //       12..11 unused | ||||
| //       10..3  inline XOR mask | ||||
| //     } else { | ||||
| //       15..3  index into xor or mapping table | ||||
| //     } | ||||
| //   } else { | ||||
| //       15..13 unused | ||||
| //           12 modifier (including virama) | ||||
| //           11 virama modifier | ||||
| //       10..8  joining type | ||||
| //        7..3  category type | ||||
| //   } | ||||
| //      2  use xor pattern | ||||
| //   1..0  mapped category | ||||
| // | ||||
| // See the definitions below for a more detailed description of the various | ||||
| // bits. | ||||
| type info uint16 | ||||
|  | ||||
| const ( | ||||
| 	catSmallMask = 0x3 | ||||
| 	catBigMask   = 0xF8 | ||||
| 	indexShift   = 3 | ||||
| 	xorBit       = 0x4    // interpret the index as an xor pattern | ||||
| 	inlineXOR    = 0xE000 // These bits are set if the XOR pattern is inlined. | ||||
|  | ||||
| 	joinShift = 8 | ||||
| 	joinMask  = 0x07 | ||||
|  | ||||
| 	viramaModifier = 0x0800 | ||||
| 	modifier       = 0x1000 | ||||
| ) | ||||
|  | ||||
| // A category corresponds to a category defined in the IDNA mapping table. | ||||
| type category uint16 | ||||
|  | ||||
| const ( | ||||
| 	unknown              category = 0 // not defined currently in unicode. | ||||
| 	mapped               category = 1 | ||||
| 	disallowedSTD3Mapped category = 2 | ||||
| 	deviation            category = 3 | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	valid               category = 0x08 | ||||
| 	validNV8            category = 0x18 | ||||
| 	validXV8            category = 0x28 | ||||
| 	disallowed          category = 0x40 | ||||
| 	disallowedSTD3Valid category = 0x80 | ||||
| 	ignored             category = 0xC0 | ||||
| ) | ||||
|  | ||||
| // join types and additional rune information | ||||
| const ( | ||||
| 	joiningL = (iota + 1) | ||||
| 	joiningD | ||||
| 	joiningT | ||||
| 	joiningR | ||||
|  | ||||
| 	//the following types are derived during processing | ||||
| 	joinZWJ | ||||
| 	joinZWNJ | ||||
| 	joinVirama | ||||
| 	numJoinTypes | ||||
| ) | ||||
|  | ||||
| func (c info) isMapped() bool { | ||||
| 	return c&0x3 != 0 | ||||
| } | ||||
|  | ||||
| func (c info) category() category { | ||||
| 	small := c & catSmallMask | ||||
| 	if small != 0 { | ||||
| 		return category(small) | ||||
| 	} | ||||
| 	return category(c & catBigMask) | ||||
| } | ||||
|  | ||||
| func (c info) joinType() info { | ||||
| 	if c.isMapped() { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return (c >> joinShift) & joinMask | ||||
| } | ||||
|  | ||||
| func (c info) isModifier() bool { | ||||
| 	return c&(modifier|catSmallMask) == modifier | ||||
| } | ||||
|  | ||||
| func (c info) isViramaModifier() bool { | ||||
| 	return c&(viramaModifier|catSmallMask) == viramaModifier | ||||
| } | ||||
							
								
								
									
										2
									
								
								vendor/golang.org/x/net/internal/timeseries/timeseries.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/golang.org/x/net/internal/timeseries/timeseries.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -371,7 +371,7 @@ func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observabl | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Failed to find a level that covers the desired range.  So just | ||||
| 	// Failed to find a level that covers the desired range. So just | ||||
| 	// extract from the last level, even if it doesn't cover the entire | ||||
| 	// desired range. | ||||
| 	ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) | ||||
|   | ||||
							
								
								
									
										20
									
								
								vendor/golang.org/x/net/trace/events.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								vendor/golang.org/x/net/trace/events.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -21,11 +21,6 @@ import ( | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ | ||||
| 	"elapsed":   elapsed, | ||||
| 	"trimSpace": strings.TrimSpace, | ||||
| }).Parse(eventsHTML)) | ||||
|  | ||||
| const maxEventsPerLog = 100 | ||||
|  | ||||
| type bucket struct { | ||||
| @@ -101,7 +96,7 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { | ||||
|  | ||||
| 	famMu.RLock() | ||||
| 	defer famMu.RUnlock() | ||||
| 	if err := eventsTmpl.Execute(w, data); err != nil { | ||||
| 	if err := eventsTmpl().Execute(w, data); err != nil { | ||||
| 		log.Printf("net/trace: Failed executing template: %v", err) | ||||
| 	} | ||||
| } | ||||
| @@ -421,6 +416,19 @@ func freeEventLog(el *eventLog) { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var eventsTmplCache *template.Template | ||||
| var eventsTmplOnce sync.Once | ||||
|  | ||||
| func eventsTmpl() *template.Template { | ||||
| 	eventsTmplOnce.Do(func() { | ||||
| 		eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ | ||||
| 			"elapsed":   elapsed, | ||||
| 			"trimSpace": strings.TrimSpace, | ||||
| 		}).Parse(eventsHTML)) | ||||
| 	}) | ||||
| 	return eventsTmplCache | ||||
| } | ||||
|  | ||||
| const eventsHTML = ` | ||||
| <html> | ||||
| 	<head> | ||||
|   | ||||
							
								
								
									
										15
									
								
								vendor/golang.org/x/net/trace/histogram.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										15
									
								
								vendor/golang.org/x/net/trace/histogram.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -12,6 +12,7 @@ import ( | ||||
| 	"html/template" | ||||
| 	"log" | ||||
| 	"math" | ||||
| 	"sync" | ||||
|  | ||||
| 	"golang.org/x/net/internal/timeseries" | ||||
| ) | ||||
| @@ -320,15 +321,20 @@ func (h *histogram) newData() *data { | ||||
|  | ||||
| func (h *histogram) html() template.HTML { | ||||
| 	buf := new(bytes.Buffer) | ||||
| 	if err := distTmpl.Execute(buf, h.newData()); err != nil { | ||||
| 	if err := distTmpl().Execute(buf, h.newData()); err != nil { | ||||
| 		buf.Reset() | ||||
| 		log.Printf("net/trace: couldn't execute template: %v", err) | ||||
| 	} | ||||
| 	return template.HTML(buf.String()) | ||||
| } | ||||
|  | ||||
| // Input: data | ||||
| var distTmpl = template.Must(template.New("distTmpl").Parse(` | ||||
| var distTmplCache *template.Template | ||||
| var distTmplOnce sync.Once | ||||
|  | ||||
| func distTmpl() *template.Template { | ||||
| 	distTmplOnce.Do(func() { | ||||
| 		// Input: data | ||||
| 		distTmplCache = template.Must(template.New("distTmpl").Parse(` | ||||
| <table> | ||||
| <tr> | ||||
|     <td style="padding:0.25em">Count: {{.Count}}</td> | ||||
| @@ -354,3 +360,6 @@ var distTmpl = template.Must(template.New("distTmpl").Parse(` | ||||
| {{end}} | ||||
| </table> | ||||
| `)) | ||||
| 	}) | ||||
| 	return distTmplCache | ||||
| } | ||||
|   | ||||
							
								
								
									
										33
									
								
								vendor/golang.org/x/net/trace/trace.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										33
									
								
								vendor/golang.org/x/net/trace/trace.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -77,7 +77,6 @@ import ( | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"golang.org/x/net/context" | ||||
| 	"golang.org/x/net/internal/timeseries" | ||||
| ) | ||||
|  | ||||
| @@ -238,7 +237,7 @@ func Render(w io.Writer, req *http.Request, sensitive bool) { | ||||
|  | ||||
| 	completedMu.RLock() | ||||
| 	defer completedMu.RUnlock() | ||||
| 	if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { | ||||
| 	if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { | ||||
| 		log.Printf("net/trace: Failed executing template: %v", err) | ||||
| 	} | ||||
| } | ||||
| @@ -271,18 +270,6 @@ type contextKeyT string | ||||
|  | ||||
| var contextKey = contextKeyT("golang.org/x/net/trace.Trace") | ||||
|  | ||||
| // NewContext returns a copy of the parent context | ||||
| // and associates it with a Trace. | ||||
| func NewContext(ctx context.Context, tr Trace) context.Context { | ||||
| 	return context.WithValue(ctx, contextKey, tr) | ||||
| } | ||||
|  | ||||
| // FromContext returns the Trace bound to the context, if any. | ||||
| func FromContext(ctx context.Context) (tr Trace, ok bool) { | ||||
| 	tr, ok = ctx.Value(contextKey).(Trace) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Trace represents an active request. | ||||
| type Trace interface { | ||||
| 	// LazyLog adds x to the event log. It will be evaluated each time the | ||||
| @@ -752,7 +739,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { | ||||
| 		and very unlikely to be the fault of this code. | ||||
|  | ||||
| 		The most likely scenario is that some code elsewhere is using | ||||
| 		a requestz.Trace after its Finish method is called. | ||||
| 		a trace.Trace after its Finish method is called. | ||||
| 		You can temporarily set the DebugUseAfterFinish var | ||||
| 		to help discover where that is; do not leave that var set, | ||||
| 		since it makes this package much less efficient. | ||||
| @@ -902,10 +889,18 @@ func elapsed(d time.Duration) string { | ||||
| 	return string(b) | ||||
| } | ||||
|  | ||||
| var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ | ||||
| 	"elapsed": elapsed, | ||||
| 	"add":     func(a, b int) int { return a + b }, | ||||
| }).Parse(pageHTML)) | ||||
| var pageTmplCache *template.Template | ||||
| var pageTmplOnce sync.Once | ||||
|  | ||||
| func pageTmpl() *template.Template { | ||||
| 	pageTmplOnce.Do(func() { | ||||
| 		pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ | ||||
| 			"elapsed": elapsed, | ||||
| 			"add":     func(a, b int) int { return a + b }, | ||||
| 		}).Parse(pageHTML)) | ||||
| 	}) | ||||
| 	return pageTmplCache | ||||
| } | ||||
|  | ||||
| const pageHTML = ` | ||||
| {{template "Prolog" .}} | ||||
|   | ||||
							
								
								
									
										21
									
								
								vendor/golang.org/x/net/trace/trace_go16.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								vendor/golang.org/x/net/trace/trace_go16.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| // Copyright 2017 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build !go1.7 | ||||
|  | ||||
| package trace | ||||
|  | ||||
| import "golang.org/x/net/context" | ||||
|  | ||||
| // NewContext returns a copy of the parent context | ||||
| // and associates it with a Trace. | ||||
| func NewContext(ctx context.Context, tr Trace) context.Context { | ||||
| 	return context.WithValue(ctx, contextKey, tr) | ||||
| } | ||||
|  | ||||
| // FromContext returns the Trace bound to the context, if any. | ||||
| func FromContext(ctx context.Context) (tr Trace, ok bool) { | ||||
| 	tr, ok = ctx.Value(contextKey).(Trace) | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										21
									
								
								vendor/golang.org/x/net/trace/trace_go17.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								vendor/golang.org/x/net/trace/trace_go17.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| // Copyright 2017 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build go1.7 | ||||
|  | ||||
| package trace | ||||
|  | ||||
| import "context" | ||||
|  | ||||
| // NewContext returns a copy of the parent context | ||||
| // and associates it with a Trace. | ||||
| func NewContext(ctx context.Context, tr Trace) context.Context { | ||||
| 	return context.WithValue(ctx, contextKey, tr) | ||||
| } | ||||
|  | ||||
| // FromContext returns the Trace bound to the context, if any. | ||||
| func FromContext(ctx context.Context) (tr Trace, ok bool) { | ||||
| 	tr, ok = ctx.Value(contextKey).(Trace) | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										27
									
								
								vendor/golang.org/x/text/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/golang.org/x/text/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| Copyright (c) 2009 The Go Authors. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|    * Redistributions of source code must retain the above copyright | ||||
| notice, this list of conditions and the following disclaimer. | ||||
|    * Redistributions in binary form must reproduce the above | ||||
| copyright notice, this list of conditions and the following disclaimer | ||||
| in the documentation and/or other materials provided with the | ||||
| distribution. | ||||
|    * Neither the name of Google Inc. nor the names of its | ||||
| contributors may be used to endorse or promote products derived from | ||||
| this software without specific prior written permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										22
									
								
								vendor/golang.org/x/text/PATENTS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								vendor/golang.org/x/text/PATENTS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| Additional IP Rights Grant (Patents) | ||||
|  | ||||
| "This implementation" means the copyrightable works distributed by | ||||
| Google as part of the Go project. | ||||
|  | ||||
| Google hereby grants to You a perpetual, worldwide, non-exclusive, | ||||
| no-charge, royalty-free, irrevocable (except as stated in this section) | ||||
| patent license to make, have made, use, offer to sell, sell, import, | ||||
| transfer and otherwise run, modify and propagate the contents of this | ||||
| implementation of Go, where such license applies only to those patent | ||||
| claims, both currently owned or controlled by Google and acquired in | ||||
| the future, licensable by Google that are necessarily infringed by this | ||||
| implementation of Go.  This grant does not include claims that would be | ||||
| infringed only as a consequence of further modification of this | ||||
| implementation.  If you or your agent or exclusive licensee institute or | ||||
| order or agree to the institution of patent litigation against any | ||||
| entity (including a cross-claim or counterclaim in a lawsuit) alleging | ||||
| that this implementation of Go or any code incorporated within this | ||||
| implementation of Go constitutes direct or contributory patent | ||||
| infringement, or inducement of patent infringement, then any patent | ||||
| rights granted to you under this License for this implementation of Go | ||||
| shall terminate as of the date such litigation is filed. | ||||
							
								
								
									
										23
									
								
								vendor/golang.org/x/text/README
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/golang.org/x/text/README
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| This repository holds supplementary Go libraries for text processing, many involving Unicode. | ||||
|  | ||||
| To submit changes to this repository, see http://golang.org/doc/contribute.html. | ||||
|  | ||||
| To generate the tables in this repository (except for the encoding tables), | ||||
| run go generate from this directory. By default tables are generated for the | ||||
| Unicode version in core and the CLDR version defined in | ||||
| golang.org/x/text/unicode/cldr. | ||||
|  | ||||
| Running go generate will as a side effect create a DATA subdirectory in this | ||||
| directory which holds all files that are used as a source for generating the | ||||
| tables. This directory will also serve as a cache. | ||||
|  | ||||
| Run | ||||
|  | ||||
| 	go test ./... | ||||
|  | ||||
| from this directory to run all tests. Add the "-tags icu" flag to also run | ||||
| ICU conformance tests (if available). This requires that you have the correct | ||||
| ICU version installed on your system. | ||||
|  | ||||
| TODO: | ||||
| - updating unversioned source files. | ||||
							
								
								
									
										351
									
								
								vendor/golang.org/x/text/internal/gen/code.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										351
									
								
								vendor/golang.org/x/text/internal/gen/code.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,351 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package gen | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/gob" | ||||
| 	"fmt" | ||||
| 	"hash" | ||||
| 	"hash/fnv" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| // This file contains utilities for generating code. | ||||
|  | ||||
| // TODO: other write methods like: | ||||
| // - slices, maps, types, etc. | ||||
|  | ||||
| // CodeWriter is a utility for writing structured code. It computes the content | ||||
| // hash and size of written content. It ensures there are newlines between | ||||
| // written code blocks. | ||||
| type CodeWriter struct { | ||||
| 	buf  bytes.Buffer | ||||
| 	Size int | ||||
| 	Hash hash.Hash32 // content hash | ||||
| 	gob  *gob.Encoder | ||||
| 	// For comments we skip the usual one-line separator if they are followed by | ||||
| 	// a code block. | ||||
| 	skipSep bool | ||||
| } | ||||
|  | ||||
| func (w *CodeWriter) Write(p []byte) (n int, err error) { | ||||
| 	return w.buf.Write(p) | ||||
| } | ||||
|  | ||||
| // NewCodeWriter returns a new CodeWriter. | ||||
| func NewCodeWriter() *CodeWriter { | ||||
| 	h := fnv.New32() | ||||
| 	return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} | ||||
| } | ||||
|  | ||||
| // WriteGoFile appends the buffer with the total size of all created structures | ||||
| // and writes it as a Go file to the the given file with the given package name. | ||||
| func (w *CodeWriter) WriteGoFile(filename, pkg string) { | ||||
| 	f, err := os.Create(filename) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not create file %s: %v", filename, err) | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	if _, err = w.WriteGo(f, pkg); err != nil { | ||||
| 		log.Fatalf("Error writing file %s: %v", filename, err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WriteGo appends the buffer with the total size of all created structures and | ||||
| // writes it as a Go file to the the given writer with the given package name. | ||||
| func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { | ||||
| 	sz := w.Size | ||||
| 	w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) | ||||
| 	defer w.buf.Reset() | ||||
| 	return WriteGo(out, pkg, w.buf.Bytes()) | ||||
| } | ||||
|  | ||||
| func (w *CodeWriter) printf(f string, x ...interface{}) { | ||||
| 	fmt.Fprintf(w, f, x...) | ||||
| } | ||||
|  | ||||
| func (w *CodeWriter) insertSep() { | ||||
| 	if w.skipSep { | ||||
| 		w.skipSep = false | ||||
| 		return | ||||
| 	} | ||||
| 	// Use at least two newlines to ensure a blank space between the previous | ||||
| 	// block. WriteGoFile will remove extraneous newlines. | ||||
| 	w.printf("\n\n") | ||||
| } | ||||
|  | ||||
| // WriteComment writes a comment block. All line starts are prefixed with "//". | ||||
| // Initial empty lines are gobbled. The indentation for the first line is | ||||
| // stripped from consecutive lines. | ||||
| func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { | ||||
| 	s := fmt.Sprintf(comment, args...) | ||||
| 	s = strings.Trim(s, "\n") | ||||
|  | ||||
| 	// Use at least two newlines to ensure a blank space between the previous | ||||
| 	// block. WriteGoFile will remove extraneous newlines. | ||||
| 	w.printf("\n\n// ") | ||||
| 	w.skipSep = true | ||||
|  | ||||
| 	// strip first indent level. | ||||
| 	sep := "\n" | ||||
| 	for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { | ||||
| 		sep += s[:1] | ||||
| 	} | ||||
|  | ||||
| 	strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) | ||||
|  | ||||
| 	w.printf("\n") | ||||
| } | ||||
|  | ||||
| func (w *CodeWriter) writeSizeInfo(size int) { | ||||
| 	w.printf("// Size: %d bytes\n", size) | ||||
| } | ||||
|  | ||||
| // WriteConst writes a constant of the given name and value. | ||||
| func (w *CodeWriter) WriteConst(name string, x interface{}) { | ||||
| 	w.insertSep() | ||||
| 	v := reflect.ValueOf(x) | ||||
|  | ||||
| 	switch v.Type().Kind() { | ||||
| 	case reflect.String: | ||||
| 		w.printf("const %s %s = ", name, typeName(x)) | ||||
| 		w.WriteString(v.String()) | ||||
| 		w.printf("\n") | ||||
| 	default: | ||||
| 		w.printf("const %s = %#v\n", name, x) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WriteVar writes a variable of the given name and value. | ||||
| func (w *CodeWriter) WriteVar(name string, x interface{}) { | ||||
| 	w.insertSep() | ||||
| 	v := reflect.ValueOf(x) | ||||
| 	oldSize := w.Size | ||||
| 	sz := int(v.Type().Size()) | ||||
| 	w.Size += sz | ||||
|  | ||||
| 	switch v.Type().Kind() { | ||||
| 	case reflect.String: | ||||
| 		w.printf("var %s %s = ", name, typeName(x)) | ||||
| 		w.WriteString(v.String()) | ||||
| 	case reflect.Struct: | ||||
| 		w.gob.Encode(x) | ||||
| 		fallthrough | ||||
| 	case reflect.Slice, reflect.Array: | ||||
| 		w.printf("var %s = ", name) | ||||
| 		w.writeValue(v) | ||||
| 		w.writeSizeInfo(w.Size - oldSize) | ||||
| 	default: | ||||
| 		w.printf("var %s %s = ", name, typeName(x)) | ||||
| 		w.gob.Encode(x) | ||||
| 		w.writeValue(v) | ||||
| 		w.writeSizeInfo(w.Size - oldSize) | ||||
| 	} | ||||
| 	w.printf("\n") | ||||
| } | ||||
|  | ||||
| func (w *CodeWriter) writeValue(v reflect.Value) { | ||||
| 	x := v.Interface() | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.String: | ||||
| 		w.WriteString(v.String()) | ||||
| 	case reflect.Array: | ||||
| 		// Don't double count: callers of WriteArray count on the size being | ||||
| 		// added, so we need to discount it here. | ||||
| 		w.Size -= int(v.Type().Size()) | ||||
| 		w.writeSlice(x, true) | ||||
| 	case reflect.Slice: | ||||
| 		w.writeSlice(x, false) | ||||
| 	case reflect.Struct: | ||||
| 		w.printf("%s{\n", typeName(v.Interface())) | ||||
| 		t := v.Type() | ||||
| 		for i := 0; i < v.NumField(); i++ { | ||||
| 			w.printf("%s: ", t.Field(i).Name) | ||||
| 			w.writeValue(v.Field(i)) | ||||
| 			w.printf(",\n") | ||||
| 		} | ||||
| 		w.printf("}") | ||||
| 	default: | ||||
| 		w.printf("%#v", x) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WriteString writes a string literal. | ||||
| func (w *CodeWriter) WriteString(s string) { | ||||
| 	s = strings.Replace(s, `\`, `\\`, -1) | ||||
| 	io.WriteString(w.Hash, s) // content hash | ||||
| 	w.Size += len(s) | ||||
|  | ||||
| 	const maxInline = 40 | ||||
| 	if len(s) <= maxInline { | ||||
| 		w.printf("%q", s) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// We will render the string as a multi-line string. | ||||
| 	const maxWidth = 80 - 4 - len(`"`) - len(`" +`) | ||||
|  | ||||
| 	// When starting on its own line, go fmt indents line 2+ an extra level. | ||||
| 	n, max := maxWidth, maxWidth-4 | ||||
|  | ||||
| 	// As per https://golang.org/issue/18078, the compiler has trouble | ||||
| 	// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, | ||||
| 	// for large N. We insert redundant, explicit parentheses to work around | ||||
| 	// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + | ||||
| 	// ... + s127) + etc + (etc + ... + sN). | ||||
| 	explicitParens, extraComment := len(s) > 128*1024, "" | ||||
| 	if explicitParens { | ||||
| 		w.printf(`(`) | ||||
| 		extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" | ||||
| 	} | ||||
|  | ||||
| 	// Print "" +\n, if a string does not start on its own line. | ||||
| 	b := w.buf.Bytes() | ||||
| 	if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { | ||||
| 		w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) | ||||
| 		n, max = maxWidth, maxWidth | ||||
| 	} | ||||
|  | ||||
| 	w.printf(`"`) | ||||
|  | ||||
| 	for sz, p, nLines := 0, 0, 0; p < len(s); { | ||||
| 		var r rune | ||||
| 		r, sz = utf8.DecodeRuneInString(s[p:]) | ||||
| 		out := s[p : p+sz] | ||||
| 		chars := 1 | ||||
| 		if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { | ||||
| 			switch sz { | ||||
| 			case 1: | ||||
| 				out = fmt.Sprintf("\\x%02x", s[p]) | ||||
| 			case 2, 3: | ||||
| 				out = fmt.Sprintf("\\u%04x", r) | ||||
| 			case 4: | ||||
| 				out = fmt.Sprintf("\\U%08x", r) | ||||
| 			} | ||||
| 			chars = len(out) | ||||
| 		} | ||||
| 		if n -= chars; n < 0 { | ||||
| 			nLines++ | ||||
| 			if explicitParens && nLines&63 == 63 { | ||||
| 				w.printf("\") + (\"") | ||||
| 			} | ||||
| 			w.printf("\" +\n\"") | ||||
| 			n = max - len(out) | ||||
| 		} | ||||
| 		w.printf("%s", out) | ||||
| 		p += sz | ||||
| 	} | ||||
| 	w.printf(`"`) | ||||
| 	if explicitParens { | ||||
| 		w.printf(`)`) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WriteSlice writes a slice value. | ||||
| func (w *CodeWriter) WriteSlice(x interface{}) { | ||||
| 	w.writeSlice(x, false) | ||||
| } | ||||
|  | ||||
| // WriteArray writes an array value. | ||||
| func (w *CodeWriter) WriteArray(x interface{}) { | ||||
| 	w.writeSlice(x, true) | ||||
| } | ||||
|  | ||||
| func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { | ||||
| 	v := reflect.ValueOf(x) | ||||
| 	w.gob.Encode(v.Len()) | ||||
| 	w.Size += v.Len() * int(v.Type().Elem().Size()) | ||||
| 	name := typeName(x) | ||||
| 	if isArray { | ||||
| 		name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) | ||||
| 	} | ||||
| 	if isArray { | ||||
| 		w.printf("%s{\n", name) | ||||
| 	} else { | ||||
| 		w.printf("%s{ // %d elements\n", name, v.Len()) | ||||
| 	} | ||||
|  | ||||
| 	switch kind := v.Type().Elem().Kind(); kind { | ||||
| 	case reflect.String: | ||||
| 		for _, s := range x.([]string) { | ||||
| 			w.WriteString(s) | ||||
| 			w.printf(",\n") | ||||
| 		} | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, | ||||
| 		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: | ||||
| 		// nLine and nBlock are the number of elements per line and block. | ||||
| 		nLine, nBlock, format := 8, 64, "%d," | ||||
| 		switch kind { | ||||
| 		case reflect.Uint8: | ||||
| 			format = "%#02x," | ||||
| 		case reflect.Uint16: | ||||
| 			format = "%#04x," | ||||
| 		case reflect.Uint32: | ||||
| 			nLine, nBlock, format = 4, 32, "%#08x," | ||||
| 		case reflect.Uint, reflect.Uint64: | ||||
| 			nLine, nBlock, format = 4, 32, "%#016x," | ||||
| 		case reflect.Int8: | ||||
| 			nLine = 16 | ||||
| 		} | ||||
| 		n := nLine | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			if i%nBlock == 0 && v.Len() > nBlock { | ||||
| 				w.printf("// Entry %X - %X\n", i, i+nBlock-1) | ||||
| 			} | ||||
| 			x := v.Index(i).Interface() | ||||
| 			w.gob.Encode(x) | ||||
| 			w.printf(format, x) | ||||
| 			if n--; n == 0 { | ||||
| 				n = nLine | ||||
| 				w.printf("\n") | ||||
| 			} | ||||
| 		} | ||||
| 		w.printf("\n") | ||||
| 	case reflect.Struct: | ||||
| 		zero := reflect.Zero(v.Type().Elem()).Interface() | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			x := v.Index(i).Interface() | ||||
| 			w.gob.EncodeValue(v) | ||||
| 			if !reflect.DeepEqual(zero, x) { | ||||
| 				line := fmt.Sprintf("%#v,\n", x) | ||||
| 				line = line[strings.IndexByte(line, '{'):] | ||||
| 				w.printf("%d: ", i) | ||||
| 				w.printf(line) | ||||
| 			} | ||||
| 		} | ||||
| 	case reflect.Array: | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			w.printf("%d: %#v,\n", i, v.Index(i).Interface()) | ||||
| 		} | ||||
| 	default: | ||||
| 		panic("gen: slice elem type not supported") | ||||
| 	} | ||||
| 	w.printf("}") | ||||
| } | ||||
|  | ||||
| // WriteType writes a definition of the type of the given value and returns the | ||||
| // type name. | ||||
| func (w *CodeWriter) WriteType(x interface{}) string { | ||||
| 	t := reflect.TypeOf(x) | ||||
| 	w.printf("type %s struct {\n", t.Name()) | ||||
| 	for i := 0; i < t.NumField(); i++ { | ||||
| 		w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) | ||||
| 	} | ||||
| 	w.printf("}\n") | ||||
| 	return t.Name() | ||||
| } | ||||
|  | ||||
| // typeName returns the name of the go type of x. | ||||
| func typeName(x interface{}) string { | ||||
| 	t := reflect.ValueOf(x).Type() | ||||
| 	return strings.Replace(fmt.Sprint(t), "main.", "", 1) | ||||
| } | ||||
							
								
								
									
										281
									
								
								vendor/golang.org/x/text/internal/gen/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										281
									
								
								vendor/golang.org/x/text/internal/gen/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,281 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package gen contains common code for the various code generation tools in the | ||||
| // text repository. Its usage ensures consistency between tools. | ||||
| // | ||||
| // This package defines command line flags that are common to most generation | ||||
| // tools. The flags allow for specifying specific Unicode and CLDR versions | ||||
| // in the public Unicode data repository (http://www.unicode.org/Public). | ||||
| // | ||||
| // A local Unicode data mirror can be set through the flag -local or the | ||||
| // environment variable UNICODE_DIR. The former takes precedence. The local | ||||
| // directory should follow the same structure as the public repository. | ||||
| // | ||||
| // IANA data can also optionally be mirrored by putting it in the iana directory | ||||
| // rooted at the top of the local mirror. Beware, though, that IANA data is not | ||||
| // versioned. So it is up to the developer to use the right version. | ||||
| package gen // import "golang.org/x/text/internal/gen" | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"go/build" | ||||
| 	"go/format" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"path" | ||||
| 	"path/filepath" | ||||
| 	"sync" | ||||
| 	"unicode" | ||||
|  | ||||
| 	"golang.org/x/text/unicode/cldr" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	url = flag.String("url", | ||||
| 		"http://www.unicode.org/Public", | ||||
| 		"URL of Unicode database directory") | ||||
| 	iana = flag.String("iana", | ||||
| 		"http://www.iana.org", | ||||
| 		"URL of the IANA repository") | ||||
| 	unicodeVersion = flag.String("unicode", | ||||
| 		getEnv("UNICODE_VERSION", unicode.Version), | ||||
| 		"unicode version to use") | ||||
| 	cldrVersion = flag.String("cldr", | ||||
| 		getEnv("CLDR_VERSION", cldr.Version), | ||||
| 		"cldr version to use") | ||||
| ) | ||||
|  | ||||
| func getEnv(name, def string) string { | ||||
| 	if v := os.Getenv(name); v != "" { | ||||
| 		return v | ||||
| 	} | ||||
| 	return def | ||||
| } | ||||
|  | ||||
| // Init performs common initialization for a gen command. It parses the flags | ||||
| // and sets up the standard logging parameters. | ||||
| func Init() { | ||||
| 	log.SetPrefix("") | ||||
| 	log.SetFlags(log.Lshortfile) | ||||
| 	flag.Parse() | ||||
| } | ||||
|  | ||||
| const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. | ||||
|  | ||||
| package %s | ||||
|  | ||||
| ` | ||||
|  | ||||
| // UnicodeVersion reports the requested Unicode version. | ||||
| func UnicodeVersion() string { | ||||
| 	return *unicodeVersion | ||||
| } | ||||
|  | ||||
| // UnicodeVersion reports the requested CLDR version. | ||||
| func CLDRVersion() string { | ||||
| 	return *cldrVersion | ||||
| } | ||||
|  | ||||
| // IsLocal reports whether data files are available locally. | ||||
| func IsLocal() bool { | ||||
| 	dir, err := localReadmeFile() | ||||
| 	if err != nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	if _, err = os.Stat(dir); err != nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // OpenUCDFile opens the requested UCD file. The file is specified relative to | ||||
| // the public Unicode root directory. It will call log.Fatal if there are any | ||||
| // errors. | ||||
| func OpenUCDFile(file string) io.ReadCloser { | ||||
| 	return openUnicode(path.Join(*unicodeVersion, "ucd", file)) | ||||
| } | ||||
|  | ||||
| // OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there | ||||
| // are any errors. | ||||
| func OpenCLDRCoreZip() io.ReadCloser { | ||||
| 	return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") | ||||
| } | ||||
|  | ||||
| // OpenUnicodeFile opens the requested file of the requested category from the | ||||
| // root of the Unicode data archive. The file is specified relative to the | ||||
| // public Unicode root directory. If version is "", it will use the default | ||||
| // Unicode version. It will call log.Fatal if there are any errors. | ||||
| func OpenUnicodeFile(category, version, file string) io.ReadCloser { | ||||
| 	if version == "" { | ||||
| 		version = UnicodeVersion() | ||||
| 	} | ||||
| 	return openUnicode(path.Join(category, version, file)) | ||||
| } | ||||
|  | ||||
| // OpenIANAFile opens the requested IANA file. The file is specified relative | ||||
| // to the IANA root, which is typically either http://www.iana.org or the | ||||
| // iana directory in the local mirror. It will call log.Fatal if there are any | ||||
| // errors. | ||||
| func OpenIANAFile(path string) io.ReadCloser { | ||||
| 	return Open(*iana, "iana", path) | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	dirMutex sync.Mutex | ||||
| 	localDir string | ||||
| ) | ||||
|  | ||||
| const permissions = 0755 | ||||
|  | ||||
| func localReadmeFile() (string, error) { | ||||
| 	p, err := build.Import("golang.org/x/text", "", build.FindOnly) | ||||
| 	if err != nil { | ||||
| 		return "", fmt.Errorf("Could not locate package: %v", err) | ||||
| 	} | ||||
| 	return filepath.Join(p.Dir, "DATA", "README"), nil | ||||
| } | ||||
|  | ||||
| func getLocalDir() string { | ||||
| 	dirMutex.Lock() | ||||
| 	defer dirMutex.Unlock() | ||||
|  | ||||
| 	readme, err := localReadmeFile() | ||||
| 	if err != nil { | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| 	dir := filepath.Dir(readme) | ||||
| 	if _, err := os.Stat(readme); err != nil { | ||||
| 		if err := os.MkdirAll(dir, permissions); err != nil { | ||||
| 			log.Fatalf("Could not create directory: %v", err) | ||||
| 		} | ||||
| 		ioutil.WriteFile(readme, []byte(readmeTxt), permissions) | ||||
| 	} | ||||
| 	return dir | ||||
| } | ||||
|  | ||||
| const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. | ||||
|  | ||||
| This directory contains downloaded files used to generate the various tables | ||||
| in the golang.org/x/text subrepo. | ||||
|  | ||||
| Note that the language subtag repo (iana/assignments/language-subtag-registry) | ||||
| and all other times in the iana subdirectory are not versioned and will need | ||||
| to be periodically manually updated. The easiest way to do this is to remove | ||||
| the entire iana directory. This is mostly of concern when updating the language | ||||
| package. | ||||
| ` | ||||
|  | ||||
| // Open opens subdir/path if a local directory is specified and the file exists, | ||||
| // where subdir is a directory relative to the local root, or fetches it from | ||||
| // urlRoot/path otherwise. It will call log.Fatal if there are any errors. | ||||
| func Open(urlRoot, subdir, path string) io.ReadCloser { | ||||
| 	file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) | ||||
| 	return open(file, urlRoot, path) | ||||
| } | ||||
|  | ||||
| func openUnicode(path string) io.ReadCloser { | ||||
| 	file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) | ||||
| 	return open(file, *url, path) | ||||
| } | ||||
|  | ||||
| // TODO: automatically periodically update non-versioned files. | ||||
|  | ||||
| func open(file, urlRoot, path string) io.ReadCloser { | ||||
| 	if f, err := os.Open(file); err == nil { | ||||
| 		return f | ||||
| 	} | ||||
| 	r := get(urlRoot, path) | ||||
| 	defer r.Close() | ||||
| 	b, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not download file: %v", err) | ||||
| 	} | ||||
| 	os.MkdirAll(filepath.Dir(file), permissions) | ||||
| 	if err := ioutil.WriteFile(file, b, permissions); err != nil { | ||||
| 		log.Fatalf("Could not create file: %v", err) | ||||
| 	} | ||||
| 	return ioutil.NopCloser(bytes.NewReader(b)) | ||||
| } | ||||
|  | ||||
| func get(root, path string) io.ReadCloser { | ||||
| 	url := root + "/" + path | ||||
| 	fmt.Printf("Fetching %s...", url) | ||||
| 	defer fmt.Println(" done.") | ||||
| 	resp, err := http.Get(url) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("HTTP GET: %v", err) | ||||
| 	} | ||||
| 	if resp.StatusCode != 200 { | ||||
| 		log.Fatalf("Bad GET status for %q: %q", url, resp.Status) | ||||
| 	} | ||||
| 	return resp.Body | ||||
| } | ||||
|  | ||||
| // TODO: use Write*Version in all applicable packages. | ||||
|  | ||||
| // WriteUnicodeVersion writes a constant for the Unicode version from which the | ||||
| // tables are generated. | ||||
| func WriteUnicodeVersion(w io.Writer) { | ||||
| 	fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") | ||||
| 	fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) | ||||
| } | ||||
|  | ||||
| // WriteCLDRVersion writes a constant for the CLDR version from which the | ||||
| // tables are generated. | ||||
| func WriteCLDRVersion(w io.Writer) { | ||||
| 	fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") | ||||
| 	fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) | ||||
| } | ||||
|  | ||||
| // WriteGoFile prepends a standard file comment and package statement to the | ||||
| // given bytes, applies gofmt, and writes them to a file with the given name. | ||||
| // It will call log.Fatal if there are any errors. | ||||
| func WriteGoFile(filename, pkg string, b []byte) { | ||||
| 	w, err := os.Create(filename) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not create file %s: %v", filename, err) | ||||
| 	} | ||||
| 	defer w.Close() | ||||
| 	if _, err = WriteGo(w, pkg, b); err != nil { | ||||
| 		log.Fatalf("Error writing file %s: %v", filename, err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WriteGo prepends a standard file comment and package statement to the given | ||||
| // bytes, applies gofmt, and writes them to w. | ||||
| func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { | ||||
| 	src := []byte(fmt.Sprintf(header, pkg)) | ||||
| 	src = append(src, b...) | ||||
| 	formatted, err := format.Source(src) | ||||
| 	if err != nil { | ||||
| 		// Print the generated code even in case of an error so that the | ||||
| 		// returned error can be meaningfully interpreted. | ||||
| 		n, _ = w.Write(src) | ||||
| 		return n, err | ||||
| 	} | ||||
| 	return w.Write(formatted) | ||||
| } | ||||
|  | ||||
| // Repackage rewrites a Go file from belonging to package main to belonging to | ||||
| // the given package. | ||||
| func Repackage(inFile, outFile, pkg string) { | ||||
| 	src, err := ioutil.ReadFile(inFile) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("reading %s: %v", inFile, err) | ||||
| 	} | ||||
| 	const toDelete = "package main\n\n" | ||||
| 	i := bytes.Index(src, []byte(toDelete)) | ||||
| 	if i < 0 { | ||||
| 		log.Fatalf("Could not find %q in %s.", toDelete, inFile) | ||||
| 	} | ||||
| 	w := &bytes.Buffer{} | ||||
| 	w.Write(src[i+len(toDelete):]) | ||||
| 	WriteGoFile(outFile, pkg, w.Bytes()) | ||||
| } | ||||
							
								
								
									
										58
									
								
								vendor/golang.org/x/text/internal/triegen/compact.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								vendor/golang.org/x/text/internal/triegen/compact.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package triegen | ||||
|  | ||||
| // This file defines Compacter and its implementations. | ||||
|  | ||||
| import "io" | ||||
|  | ||||
| // A Compacter generates an alternative, more space-efficient way to store a | ||||
| // trie value block. A trie value block holds all possible values for the last | ||||
| // byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block | ||||
| // always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). | ||||
| type Compacter interface { | ||||
| 	// Size returns whether the Compacter could encode the given block as well | ||||
| 	// as its size in case it can. len(v) is always 64. | ||||
| 	Size(v []uint64) (sz int, ok bool) | ||||
|  | ||||
| 	// Store stores the block using the Compacter's compression method. | ||||
| 	// It returns a handle with which the block can be retrieved. | ||||
| 	// len(v) is always 64. | ||||
| 	Store(v []uint64) uint32 | ||||
|  | ||||
| 	// Print writes the data structures associated to the given store to w. | ||||
| 	Print(w io.Writer) error | ||||
|  | ||||
| 	// Handler returns the name of a function that gets called during trie | ||||
| 	// lookup for blocks generated by the Compacter. The function should be of | ||||
| 	// the form func (n uint32, b byte) uint64, where n is the index returned by | ||||
| 	// the Compacter's Store method and b is the last byte of the UTF-8 | ||||
| 	// encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the | ||||
| 	// block. | ||||
| 	Handler() string | ||||
| } | ||||
|  | ||||
| // simpleCompacter is the default Compacter used by builder. It implements a | ||||
| // normal trie block. | ||||
| type simpleCompacter builder | ||||
|  | ||||
| func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { | ||||
| 	return blockSize * b.ValueSize, true | ||||
| } | ||||
|  | ||||
| func (b *simpleCompacter) Store(v []uint64) uint32 { | ||||
| 	h := uint32(len(b.ValueBlocks) - blockOffset) | ||||
| 	b.ValueBlocks = append(b.ValueBlocks, v) | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| func (b *simpleCompacter) Print(io.Writer) error { | ||||
| 	// Structures are printed in print.go. | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (b *simpleCompacter) Handler() string { | ||||
| 	panic("Handler should be special-cased for this Compacter") | ||||
| } | ||||
							
								
								
									
										251
									
								
								vendor/golang.org/x/text/internal/triegen/print.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										251
									
								
								vendor/golang.org/x/text/internal/triegen/print.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,251 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package triegen | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 	"text/template" | ||||
| ) | ||||
|  | ||||
| // print writes all the data structures as well as the code necessary to use the | ||||
| // trie to w. | ||||
| func (b *builder) print(w io.Writer) error { | ||||
| 	b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize | ||||
| 	b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize | ||||
| 	b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize | ||||
| 	b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize | ||||
| 	b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize | ||||
|  | ||||
| 	// If we only have one root trie, all starter blocks are at position 0 and | ||||
| 	// we can access the arrays directly. | ||||
| 	if len(b.Trie) == 1 { | ||||
| 		// At this point we cannot refer to the generated tables directly. | ||||
| 		b.ASCIIBlock = b.Name + "Values" | ||||
| 		b.StarterBlock = b.Name + "Index" | ||||
| 	} else { | ||||
| 		// Otherwise we need to have explicit starter indexes in the trie | ||||
| 		// structure. | ||||
| 		b.ASCIIBlock = "t.ascii" | ||||
| 		b.StarterBlock = "t.utf8Start" | ||||
| 	} | ||||
|  | ||||
| 	b.SourceType = "[]byte" | ||||
| 	if err := lookupGen.Execute(w, b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	b.SourceType = "string" | ||||
| 	if err := lookupGen.Execute(w, b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := trieGen.Execute(w, b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	for _, c := range b.Compactions { | ||||
| 		if err := c.c.Print(w); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func printValues(n int, values []uint64) string { | ||||
| 	w := &bytes.Buffer{} | ||||
| 	boff := n * blockSize | ||||
| 	fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) | ||||
| 	var newline bool | ||||
| 	for i, v := range values { | ||||
| 		if i%6 == 0 { | ||||
| 			newline = true | ||||
| 		} | ||||
| 		if v != 0 { | ||||
| 			if newline { | ||||
| 				fmt.Fprintf(w, "\n") | ||||
| 				newline = false | ||||
| 			} | ||||
| 			fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) | ||||
| 		} | ||||
| 	} | ||||
| 	return w.String() | ||||
| } | ||||
|  | ||||
| func printIndex(b *builder, nr int, n *node) string { | ||||
| 	w := &bytes.Buffer{} | ||||
| 	boff := nr * blockSize | ||||
| 	fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) | ||||
| 	var newline bool | ||||
| 	for i, c := range n.children { | ||||
| 		if i%8 == 0 { | ||||
| 			newline = true | ||||
| 		} | ||||
| 		if c != nil { | ||||
| 			v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) | ||||
| 			if v != 0 { | ||||
| 				if newline { | ||||
| 					fmt.Fprintf(w, "\n") | ||||
| 					newline = false | ||||
| 				} | ||||
| 				fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return w.String() | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ | ||||
| 		"printValues": printValues, | ||||
| 		"printIndex":  printIndex, | ||||
| 		"title":       strings.Title, | ||||
| 		"dec":         func(x int) int { return x - 1 }, | ||||
| 		"psize": func(n int) string { | ||||
| 			return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) | ||||
| 		}, | ||||
| 	}).Parse(trieTemplate)) | ||||
| 	lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) | ||||
| ) | ||||
|  | ||||
| // TODO: consider the return type of lookup. It could be uint64, even if the | ||||
| // internal value type is smaller. We will have to verify this with the | ||||
| // performance of unicode/norm, which is very sensitive to such changes. | ||||
| const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} | ||||
| // {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. | ||||
| type {{.Name}}Trie struct { {{if $multi}} | ||||
| 	ascii []{{.ValueType}} // index for ASCII bytes | ||||
| 	utf8Start  []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 | ||||
| {{end}}} | ||||
|  | ||||
| func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} | ||||
| 	h := {{.Name}}TrieHandles[i] | ||||
| 	return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } | ||||
| } | ||||
|  | ||||
| type {{.Name}}TrieHandle struct { | ||||
| 	ascii, multi {{.IndexType}} | ||||
| } | ||||
|  | ||||
| // {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes | ||||
| var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ | ||||
| {{range .Trie}}	{ {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} | ||||
| {{end}}}{{else}} | ||||
| 	return &{{.Name}}Trie{} | ||||
| } | ||||
| {{end}} | ||||
| // lookupValue determines the type of block n and looks up the value for b. | ||||
| func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { | ||||
| 	switch { {{range $i, $c := .Compactions}} | ||||
| 		{{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} | ||||
| 			n -= {{$c.Offset}}{{end}} | ||||
| 			return {{print $b.ValueType}}({{$c.Handler}}){{end}} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes | ||||
| // The third block is the zero block. | ||||
| var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { | ||||
| {{range $i, $v := .ValueBlocks}}{{printValues $i $v}} | ||||
| {{end}}} | ||||
|  | ||||
| // {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes | ||||
| // Block 0 is the zero block. | ||||
| var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { | ||||
| {{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} | ||||
| {{end}}} | ||||
| ` | ||||
|  | ||||
| // TODO: consider allowing zero-length strings after evaluating performance with | ||||
| // unicode/norm. | ||||
| const lookupTemplate = ` | ||||
| // lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and | ||||
| // the width in bytes of this encoding. The size will be 0 if s does not | ||||
| // hold enough bytes to complete the encoding. len(s) must be greater than 0. | ||||
| func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { | ||||
| 	c0 := s[0] | ||||
| 	switch { | ||||
| 	case c0 < 0x80: // is ASCII | ||||
| 		return {{.ASCIIBlock}}[c0], 1 | ||||
| 	case c0 < 0xC2: | ||||
| 		return 0, 1  // Illegal UTF-8: not a starter, not ASCII. | ||||
| 	case c0 < 0xE0: // 2-byte UTF-8 | ||||
| 		if len(s) < 2 { | ||||
| 			return 0, 0 | ||||
| 		} | ||||
| 		i := {{.StarterBlock}}[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return 0, 1 // Illegal UTF-8: not a continuation byte. | ||||
| 		} | ||||
| 		return t.lookupValue(uint32(i), c1), 2 | ||||
| 	case c0 < 0xF0: // 3-byte UTF-8 | ||||
| 		if len(s) < 3 { | ||||
| 			return 0, 0 | ||||
| 		} | ||||
| 		i := {{.StarterBlock}}[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return 0, 1 // Illegal UTF-8: not a continuation byte. | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = {{.Name}}Index[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return 0, 2 // Illegal UTF-8: not a continuation byte. | ||||
| 		} | ||||
| 		return t.lookupValue(uint32(i), c2), 3 | ||||
| 	case c0 < 0xF8: // 4-byte UTF-8 | ||||
| 		if len(s) < 4 { | ||||
| 			return 0, 0 | ||||
| 		} | ||||
| 		i := {{.StarterBlock}}[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return 0, 1 // Illegal UTF-8: not a continuation byte. | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = {{.Name}}Index[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return 0, 2 // Illegal UTF-8: not a continuation byte. | ||||
| 		} | ||||
| 		o = uint32(i)<<6 + uint32(c2) | ||||
| 		i = {{.Name}}Index[o] | ||||
| 		c3 := s[3] | ||||
| 		if c3 < 0x80 || 0xC0 <= c3 { | ||||
| 			return 0, 3 // Illegal UTF-8: not a continuation byte. | ||||
| 		} | ||||
| 		return t.lookupValue(uint32(i), c3), 4 | ||||
| 	} | ||||
| 	// Illegal rune | ||||
| 	return 0, 1 | ||||
| } | ||||
|  | ||||
| // lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. | ||||
| // s must start with a full and valid UTF-8 encoded rune. | ||||
| func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { | ||||
| 	c0 := s[0] | ||||
| 	if c0 < 0x80 { // is ASCII | ||||
| 		return {{.ASCIIBlock}}[c0] | ||||
| 	} | ||||
| 	i := {{.StarterBlock}}[c0] | ||||
| 	if c0 < 0xE0 { // 2-byte UTF-8 | ||||
| 		return t.lookupValue(uint32(i), s[1]) | ||||
| 	} | ||||
| 	i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] | ||||
| 	if c0 < 0xF0 { // 3-byte UTF-8 | ||||
| 		return t.lookupValue(uint32(i), s[2]) | ||||
| 	} | ||||
| 	i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] | ||||
| 	if c0 < 0xF8 { // 4-byte UTF-8 | ||||
| 		return t.lookupValue(uint32(i), s[3]) | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
| ` | ||||
							
								
								
									
										494
									
								
								vendor/golang.org/x/text/internal/triegen/triegen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										494
									
								
								vendor/golang.org/x/text/internal/triegen/triegen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,494 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package triegen implements a code generator for a trie for associating | ||||
| // unsigned integer values with UTF-8 encoded runes. | ||||
| // | ||||
| // Many of the go.text packages use tries for storing per-rune information.  A | ||||
| // trie is especially useful if many of the runes have the same value. If this | ||||
| // is the case, many blocks can be expected to be shared allowing for | ||||
| // information on many runes to be stored in little space. | ||||
| // | ||||
| // As most of the lookups are done directly on []byte slices, the tries use the | ||||
| // UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to | ||||
| // runes and contributes a little bit to better performance. It also naturally | ||||
| // provides a fast path for ASCII. | ||||
| // | ||||
| // Space is also an issue. There are many code points defined in Unicode and as | ||||
| // a result tables can get quite large. So every byte counts. The triegen | ||||
| // package automatically chooses the smallest integer values to represent the | ||||
| // tables. Compacters allow further compression of the trie by allowing for | ||||
| // alternative representations of individual trie blocks. | ||||
| // | ||||
| // triegen allows generating multiple tries as a single structure. This is | ||||
| // useful when, for example, one wants to generate tries for several languages | ||||
| // that have a lot of values in common. Some existing libraries for | ||||
| // internationalization store all per-language data as a dynamically loadable | ||||
| // chunk. The go.text packages are designed with the assumption that the user | ||||
| // typically wants to compile in support for all supported languages, in line | ||||
| // with the approach common to Go to create a single standalone binary. The | ||||
| // multi-root trie approach can give significant storage savings in this | ||||
| // scenario. | ||||
| // | ||||
| // triegen generates both tables and code. The code is optimized to use the | ||||
| // automatically chosen data types. The following code is generated for a Trie | ||||
| // or multiple Tries named "foo": | ||||
| //	- type fooTrie | ||||
| //		The trie type. | ||||
| // | ||||
| //	- func newFooTrie(x int) *fooTrie | ||||
| //		Trie constructor, where x is the index of the trie passed to Gen. | ||||
| // | ||||
| //	- func (t *fooTrie) lookup(s []byte) (v uintX, sz int) | ||||
| //		The lookup method, where uintX is automatically chosen. | ||||
| // | ||||
| //	- func lookupString, lookupUnsafe and lookupStringUnsafe | ||||
| //		Variants of the above. | ||||
| // | ||||
| //	- var fooValues and fooIndex and any tables generated by Compacters. | ||||
| //		The core trie data. | ||||
| // | ||||
| //	- var fooTrieHandles | ||||
| //		Indexes of starter blocks in case of multiple trie roots. | ||||
| // | ||||
| // It is recommended that users test the generated trie by checking the returned | ||||
| // value for every rune. Such exhaustive tests are possible as the the number of | ||||
| // runes in Unicode is limited. | ||||
| package triegen // import "golang.org/x/text/internal/triegen" | ||||
|  | ||||
| // TODO: Arguably, the internally optimized data types would not have to be | ||||
| // exposed in the generated API. We could also investigate not generating the | ||||
| // code, but using it through a package. We would have to investigate the impact | ||||
| // on performance of making such change, though. For packages like unicode/norm, | ||||
| // small changes like this could tank performance. | ||||
|  | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"fmt" | ||||
| 	"hash/crc64" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| // builder builds a set of tries for associating values with runes. The set of | ||||
| // tries can share common index and value blocks. | ||||
| type builder struct { | ||||
| 	Name string | ||||
|  | ||||
| 	// ValueType is the type of the trie values looked up. | ||||
| 	ValueType string | ||||
|  | ||||
| 	// ValueSize is the byte size of the ValueType. | ||||
| 	ValueSize int | ||||
|  | ||||
| 	// IndexType is the type of trie index values used for all UTF-8 bytes of | ||||
| 	// a rune except the last one. | ||||
| 	IndexType string | ||||
|  | ||||
| 	// IndexSize is the byte size of the IndexType. | ||||
| 	IndexSize int | ||||
|  | ||||
| 	// SourceType is used when generating the lookup functions. If the user | ||||
| 	// requests StringSupport, all lookup functions will be generated for | ||||
| 	// string input as well. | ||||
| 	SourceType string | ||||
|  | ||||
| 	Trie []*Trie | ||||
|  | ||||
| 	IndexBlocks []*node | ||||
| 	ValueBlocks [][]uint64 | ||||
| 	Compactions []compaction | ||||
| 	Checksum    uint64 | ||||
|  | ||||
| 	ASCIIBlock   string | ||||
| 	StarterBlock string | ||||
|  | ||||
| 	indexBlockIdx map[uint64]int | ||||
| 	valueBlockIdx map[uint64]nodeIndex | ||||
| 	asciiBlockIdx map[uint64]int | ||||
|  | ||||
| 	// Stats are used to fill out the template. | ||||
| 	Stats struct { | ||||
| 		NValueEntries int | ||||
| 		NValueBytes   int | ||||
| 		NIndexEntries int | ||||
| 		NIndexBytes   int | ||||
| 		NHandleBytes  int | ||||
| 	} | ||||
|  | ||||
| 	err error | ||||
| } | ||||
|  | ||||
| // A nodeIndex encodes the index of a node, which is defined by the compaction | ||||
| // which stores it and an index within the compaction. For internal nodes, the | ||||
| // compaction is always 0. | ||||
| type nodeIndex struct { | ||||
| 	compaction int | ||||
| 	index      int | ||||
| } | ||||
|  | ||||
| // compaction keeps track of stats used for the compaction. | ||||
| type compaction struct { | ||||
| 	c         Compacter | ||||
| 	blocks    []*node | ||||
| 	maxHandle uint32 | ||||
| 	totalSize int | ||||
|  | ||||
| 	// Used by template-based generator and thus exported. | ||||
| 	Cutoff  uint32 | ||||
| 	Offset  uint32 | ||||
| 	Handler string | ||||
| } | ||||
|  | ||||
| func (b *builder) setError(err error) { | ||||
| 	if b.err == nil { | ||||
| 		b.err = err | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // An Option can be passed to Gen. | ||||
| type Option func(b *builder) error | ||||
|  | ||||
| // Compact configures the trie generator to use the given Compacter. | ||||
| func Compact(c Compacter) Option { | ||||
| 	return func(b *builder) error { | ||||
| 		b.Compactions = append(b.Compactions, compaction{ | ||||
| 			c:       c, | ||||
| 			Handler: c.Handler() + "(n, b)"}) | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Gen writes Go code for a shared trie lookup structure to w for the given | ||||
| // Tries. The generated trie type will be called nameTrie. newNameTrie(x) will | ||||
| // return the *nameTrie for tries[x]. A value can be looked up by using one of | ||||
| // the various lookup methods defined on nameTrie. It returns the table size of | ||||
| // the generated trie. | ||||
| func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { | ||||
| 	// The index contains two dummy blocks, followed by the zero block. The zero | ||||
| 	// block is at offset 0x80, so that the offset for the zero block for | ||||
| 	// continuation bytes is 0. | ||||
| 	b := &builder{ | ||||
| 		Name:        name, | ||||
| 		Trie:        tries, | ||||
| 		IndexBlocks: []*node{{}, {}, {}}, | ||||
| 		Compactions: []compaction{{ | ||||
| 			Handler: name + "Values[n<<6+uint32(b)]", | ||||
| 		}}, | ||||
| 		// The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero | ||||
| 		// block. | ||||
| 		indexBlockIdx: map[uint64]int{0: 0}, | ||||
| 		valueBlockIdx: map[uint64]nodeIndex{0: {}}, | ||||
| 		asciiBlockIdx: map[uint64]int{}, | ||||
| 	} | ||||
| 	b.Compactions[0].c = (*simpleCompacter)(b) | ||||
|  | ||||
| 	for _, f := range opts { | ||||
| 		if err := f(b); err != nil { | ||||
| 			return 0, err | ||||
| 		} | ||||
| 	} | ||||
| 	b.build() | ||||
| 	if b.err != nil { | ||||
| 		return 0, b.err | ||||
| 	} | ||||
| 	if err = b.print(w); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	return b.Size(), nil | ||||
| } | ||||
|  | ||||
| // A Trie represents a single root node of a trie. A builder may build several | ||||
| // overlapping tries at once. | ||||
| type Trie struct { | ||||
| 	root *node | ||||
|  | ||||
| 	hiddenTrie | ||||
| } | ||||
|  | ||||
| // hiddenTrie contains values we want to be visible to the template generator, | ||||
| // but hidden from the API documentation. | ||||
| type hiddenTrie struct { | ||||
| 	Name         string | ||||
| 	Checksum     uint64 | ||||
| 	ASCIIIndex   int | ||||
| 	StarterIndex int | ||||
| } | ||||
|  | ||||
| // NewTrie returns a new trie root. | ||||
| func NewTrie(name string) *Trie { | ||||
| 	return &Trie{ | ||||
| 		&node{ | ||||
| 			children: make([]*node, blockSize), | ||||
| 			values:   make([]uint64, utf8.RuneSelf), | ||||
| 		}, | ||||
| 		hiddenTrie{Name: name}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Gen is a convenience wrapper around the Gen func passing t as the only trie | ||||
| // and uses the name passed to NewTrie. It returns the size of the generated | ||||
| // tables. | ||||
| func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { | ||||
| 	return Gen(w, t.Name, []*Trie{t}, opts...) | ||||
| } | ||||
|  | ||||
| // node is a node of the intermediate trie structure. | ||||
| type node struct { | ||||
| 	// children holds this node's children. It is always of length 64. | ||||
| 	// A child node may be nil. | ||||
| 	children []*node | ||||
|  | ||||
| 	// values contains the values of this node. If it is non-nil, this node is | ||||
| 	// either a root or leaf node: | ||||
| 	// For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. | ||||
| 	// For leaf nodes, len(values) ==  64 and it maps the bytes in [0x80, 0xBF]. | ||||
| 	values []uint64 | ||||
|  | ||||
| 	index nodeIndex | ||||
| } | ||||
|  | ||||
| // Insert associates value with the given rune. Insert will panic if a non-zero | ||||
| // value is passed for an invalid rune. | ||||
| func (t *Trie) Insert(r rune, value uint64) { | ||||
| 	if value == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	s := string(r) | ||||
| 	if []rune(s)[0] != r && value != 0 { | ||||
| 		// Note: The UCD tables will always assign what amounts to a zero value | ||||
| 		// to a surrogate. Allowing a zero value for an illegal rune allows | ||||
| 		// users to iterate over [0..MaxRune] without having to explicitly | ||||
| 		// exclude surrogates, which would be tedious. | ||||
| 		panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) | ||||
| 	} | ||||
| 	if len(s) == 1 { | ||||
| 		// It is a root node value (ASCII). | ||||
| 		t.root.values[s[0]] = value | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	n := t.root | ||||
| 	for ; len(s) > 1; s = s[1:] { | ||||
| 		if n.children == nil { | ||||
| 			n.children = make([]*node, blockSize) | ||||
| 		} | ||||
| 		p := s[0] % blockSize | ||||
| 		c := n.children[p] | ||||
| 		if c == nil { | ||||
| 			c = &node{} | ||||
| 			n.children[p] = c | ||||
| 		} | ||||
| 		if len(s) > 2 && c.values != nil { | ||||
| 			log.Fatalf("triegen: insert(%U): found internal node with values", r) | ||||
| 		} | ||||
| 		n = c | ||||
| 	} | ||||
| 	if n.values == nil { | ||||
| 		n.values = make([]uint64, blockSize) | ||||
| 	} | ||||
| 	if n.children != nil { | ||||
| 		log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) | ||||
| 	} | ||||
| 	n.values[s[0]-0x80] = value | ||||
| } | ||||
|  | ||||
| // Size returns the number of bytes the generated trie will take to store. It | ||||
| // needs to be exported as it is used in the templates. | ||||
| func (b *builder) Size() int { | ||||
| 	// Index blocks. | ||||
| 	sz := len(b.IndexBlocks) * blockSize * b.IndexSize | ||||
|  | ||||
| 	// Skip the first compaction, which represents the normal value blocks, as | ||||
| 	// its totalSize does not account for the ASCII blocks, which are managed | ||||
| 	// separately. | ||||
| 	sz += len(b.ValueBlocks) * blockSize * b.ValueSize | ||||
| 	for _, c := range b.Compactions[1:] { | ||||
| 		sz += c.totalSize | ||||
| 	} | ||||
|  | ||||
| 	// TODO: this computation does not account for the fixed overhead of a using | ||||
| 	// a compaction, either code or data. As for data, though, the typical | ||||
| 	// overhead of data is in the order of bytes (2 bytes for cases). Further, | ||||
| 	// the savings of using a compaction should anyway be substantial for it to | ||||
| 	// be worth it. | ||||
|  | ||||
| 	// For multi-root tries, we also need to account for the handles. | ||||
| 	if len(b.Trie) > 1 { | ||||
| 		sz += 2 * b.IndexSize * len(b.Trie) | ||||
| 	} | ||||
| 	return sz | ||||
| } | ||||
|  | ||||
| func (b *builder) build() { | ||||
| 	// Compute the sizes of the values. | ||||
| 	var vmax uint64 | ||||
| 	for _, t := range b.Trie { | ||||
| 		vmax = maxValue(t.root, vmax) | ||||
| 	} | ||||
| 	b.ValueType, b.ValueSize = getIntType(vmax) | ||||
|  | ||||
| 	// Compute all block allocations. | ||||
| 	// TODO: first compute the ASCII blocks for all tries and then the other | ||||
| 	// nodes. ASCII blocks are more restricted in placement, as they require two | ||||
| 	// blocks to be placed consecutively. Processing them first may improve | ||||
| 	// sharing (at least one zero block can be expected to be saved.) | ||||
| 	for _, t := range b.Trie { | ||||
| 		b.Checksum += b.buildTrie(t) | ||||
| 	} | ||||
|  | ||||
| 	// Compute the offsets for all the Compacters. | ||||
| 	offset := uint32(0) | ||||
| 	for i := range b.Compactions { | ||||
| 		c := &b.Compactions[i] | ||||
| 		c.Offset = offset | ||||
| 		offset += c.maxHandle + 1 | ||||
| 		c.Cutoff = offset | ||||
| 	} | ||||
|  | ||||
| 	// Compute the sizes of indexes. | ||||
| 	// TODO: different byte positions could have different sizes. So far we have | ||||
| 	// not found a case where this is beneficial. | ||||
| 	imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) | ||||
| 	for _, ib := range b.IndexBlocks { | ||||
| 		if x := uint64(ib.index.index); x > imax { | ||||
| 			imax = x | ||||
| 		} | ||||
| 	} | ||||
| 	b.IndexType, b.IndexSize = getIntType(imax) | ||||
| } | ||||
|  | ||||
| func maxValue(n *node, max uint64) uint64 { | ||||
| 	if n == nil { | ||||
| 		return max | ||||
| 	} | ||||
| 	for _, c := range n.children { | ||||
| 		max = maxValue(c, max) | ||||
| 	} | ||||
| 	for _, v := range n.values { | ||||
| 		if max < v { | ||||
| 			max = v | ||||
| 		} | ||||
| 	} | ||||
| 	return max | ||||
| } | ||||
|  | ||||
| func getIntType(v uint64) (string, int) { | ||||
| 	switch { | ||||
| 	case v < 1<<8: | ||||
| 		return "uint8", 1 | ||||
| 	case v < 1<<16: | ||||
| 		return "uint16", 2 | ||||
| 	case v < 1<<32: | ||||
| 		return "uint32", 4 | ||||
| 	} | ||||
| 	return "uint64", 8 | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	blockSize = 64 | ||||
|  | ||||
| 	// Subtract two blocks to offset 0x80, the first continuation byte. | ||||
| 	blockOffset = 2 | ||||
|  | ||||
| 	// Subtract three blocks to offset 0xC0, the first non-ASCII starter. | ||||
| 	rootBlockOffset = 3 | ||||
| ) | ||||
|  | ||||
| var crcTable = crc64.MakeTable(crc64.ISO) | ||||
|  | ||||
| func (b *builder) buildTrie(t *Trie) uint64 { | ||||
| 	n := t.root | ||||
|  | ||||
| 	// Get the ASCII offset. For the first trie, the ASCII block will be at | ||||
| 	// position 0. | ||||
| 	hasher := crc64.New(crcTable) | ||||
| 	binary.Write(hasher, binary.BigEndian, n.values) | ||||
| 	hash := hasher.Sum64() | ||||
|  | ||||
| 	v, ok := b.asciiBlockIdx[hash] | ||||
| 	if !ok { | ||||
| 		v = len(b.ValueBlocks) | ||||
| 		b.asciiBlockIdx[hash] = v | ||||
|  | ||||
| 		b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) | ||||
| 		if v == 0 { | ||||
| 			// Add the zero block at position 2 so that it will be assigned a | ||||
| 			// zero reference in the lookup blocks. | ||||
| 			// TODO: always do this? This would allow us to remove a check from | ||||
| 			// the trie lookup, but at the expense of extra space. Analyze | ||||
| 			// performance for unicode/norm. | ||||
| 			b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) | ||||
| 		} | ||||
| 	} | ||||
| 	t.ASCIIIndex = v | ||||
|  | ||||
| 	// Compute remaining offsets. | ||||
| 	t.Checksum = b.computeOffsets(n, true) | ||||
| 	// We already subtracted the normal blockOffset from the index. Subtract the | ||||
| 	// difference for starter bytes. | ||||
| 	t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) | ||||
| 	return t.Checksum | ||||
| } | ||||
|  | ||||
| func (b *builder) computeOffsets(n *node, root bool) uint64 { | ||||
| 	// For the first trie, the root lookup block will be at position 3, which is | ||||
| 	// the offset for UTF-8 non-ASCII starter bytes. | ||||
| 	first := len(b.IndexBlocks) == rootBlockOffset | ||||
| 	if first { | ||||
| 		b.IndexBlocks = append(b.IndexBlocks, n) | ||||
| 	} | ||||
|  | ||||
| 	// We special-case the cases where all values recursively are 0. This allows | ||||
| 	// for the use of a zero block to which all such values can be directed. | ||||
| 	hash := uint64(0) | ||||
| 	if n.children != nil || n.values != nil { | ||||
| 		hasher := crc64.New(crcTable) | ||||
| 		for _, c := range n.children { | ||||
| 			var v uint64 | ||||
| 			if c != nil { | ||||
| 				v = b.computeOffsets(c, false) | ||||
| 			} | ||||
| 			binary.Write(hasher, binary.BigEndian, v) | ||||
| 		} | ||||
| 		binary.Write(hasher, binary.BigEndian, n.values) | ||||
| 		hash = hasher.Sum64() | ||||
| 	} | ||||
|  | ||||
| 	if first { | ||||
| 		b.indexBlockIdx[hash] = rootBlockOffset - blockOffset | ||||
| 	} | ||||
|  | ||||
| 	// Compacters don't apply to internal nodes. | ||||
| 	if n.children != nil { | ||||
| 		v, ok := b.indexBlockIdx[hash] | ||||
| 		if !ok { | ||||
| 			v = len(b.IndexBlocks) - blockOffset | ||||
| 			b.IndexBlocks = append(b.IndexBlocks, n) | ||||
| 			b.indexBlockIdx[hash] = v | ||||
| 		} | ||||
| 		n.index = nodeIndex{0, v} | ||||
| 	} else { | ||||
| 		h, ok := b.valueBlockIdx[hash] | ||||
| 		if !ok { | ||||
| 			bestI, bestSize := 0, blockSize*b.ValueSize | ||||
| 			for i, c := range b.Compactions[1:] { | ||||
| 				if sz, ok := c.c.Size(n.values); ok && bestSize > sz { | ||||
| 					bestI, bestSize = i+1, sz | ||||
| 				} | ||||
| 			} | ||||
| 			c := &b.Compactions[bestI] | ||||
| 			c.totalSize += bestSize | ||||
| 			v := c.c.Store(n.values) | ||||
| 			if c.maxHandle < v { | ||||
| 				c.maxHandle = v | ||||
| 			} | ||||
| 			h = nodeIndex{bestI, int(v)} | ||||
| 			b.valueBlockIdx[hash] = h | ||||
| 		} | ||||
| 		n.index = h | ||||
| 	} | ||||
| 	return hash | ||||
| } | ||||
							
								
								
									
										376
									
								
								vendor/golang.org/x/text/internal/ucd/ucd.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										376
									
								
								vendor/golang.org/x/text/internal/ucd/ucd.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,376 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package ucd provides a parser for Unicode Character Database files, the | ||||
| // format of which is defined in http://www.unicode.org/reports/tr44/. See | ||||
| // http://www.unicode.org/Public/UCD/latest/ucd/ for example files. | ||||
| // | ||||
| // It currently does not support substitutions of missing fields. | ||||
| package ucd // import "golang.org/x/text/internal/ucd" | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // UnicodeData.txt fields. | ||||
| const ( | ||||
| 	CodePoint = iota | ||||
| 	Name | ||||
| 	GeneralCategory | ||||
| 	CanonicalCombiningClass | ||||
| 	BidiClass | ||||
| 	DecompMapping | ||||
| 	DecimalValue | ||||
| 	DigitValue | ||||
| 	NumericValue | ||||
| 	BidiMirrored | ||||
| 	Unicode1Name | ||||
| 	ISOComment | ||||
| 	SimpleUppercaseMapping | ||||
| 	SimpleLowercaseMapping | ||||
| 	SimpleTitlecaseMapping | ||||
| ) | ||||
|  | ||||
| // Parse calls f for each entry in the given reader of a UCD file. It will close | ||||
| // the reader upon return. It will call log.Fatal if any error occurred. | ||||
| // | ||||
| // This implements the most common usage pattern of using Parser. | ||||
| func Parse(r io.ReadCloser, f func(p *Parser)) { | ||||
| 	defer r.Close() | ||||
|  | ||||
| 	p := New(r) | ||||
| 	for p.Next() { | ||||
| 		f(p) | ||||
| 	} | ||||
| 	if err := p.Err(); err != nil { | ||||
| 		r.Close() // os.Exit will cause defers not to be called. | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // An Option is used to configure a Parser. | ||||
| type Option func(p *Parser) | ||||
|  | ||||
| func keepRanges(p *Parser) { | ||||
| 	p.keepRanges = true | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	// KeepRanges prevents the expansion of ranges. The raw ranges can be | ||||
| 	// obtained by calling Range(0) on the parser. | ||||
| 	KeepRanges Option = keepRanges | ||||
| ) | ||||
|  | ||||
| // The Part option register a handler for lines starting with a '@'. The text | ||||
| // after a '@' is available as the first field. Comments are handled as usual. | ||||
| func Part(f func(p *Parser)) Option { | ||||
| 	return func(p *Parser) { | ||||
| 		p.partHandler = f | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // The CommentHandler option passes comments that are on a line by itself to | ||||
| // a given handler. | ||||
| func CommentHandler(f func(s string)) Option { | ||||
| 	return func(p *Parser) { | ||||
| 		p.commentHandler = f | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // A Parser parses Unicode Character Database (UCD) files. | ||||
| type Parser struct { | ||||
| 	scanner *bufio.Scanner | ||||
|  | ||||
| 	keepRanges bool // Don't expand rune ranges in field 0. | ||||
|  | ||||
| 	err     error | ||||
| 	comment []byte | ||||
| 	field   [][]byte | ||||
| 	// parsedRange is needed in case Range(0) is called more than once for one | ||||
| 	// field. In some cases this requires scanning ahead. | ||||
| 	parsedRange          bool | ||||
| 	rangeStart, rangeEnd rune | ||||
|  | ||||
| 	partHandler    func(p *Parser) | ||||
| 	commentHandler func(s string) | ||||
| } | ||||
|  | ||||
| func (p *Parser) setError(err error) { | ||||
| 	if p.err == nil { | ||||
| 		p.err = err | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *Parser) getField(i int) []byte { | ||||
| 	if i >= len(p.field) { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return p.field[i] | ||||
| } | ||||
|  | ||||
| // Err returns a non-nil error if any error occurred during parsing. | ||||
| func (p *Parser) Err() error { | ||||
| 	return p.err | ||||
| } | ||||
|  | ||||
| // New returns a Parser for the given Reader. | ||||
| func New(r io.Reader, o ...Option) *Parser { | ||||
| 	p := &Parser{ | ||||
| 		scanner: bufio.NewScanner(r), | ||||
| 	} | ||||
| 	for _, f := range o { | ||||
| 		f(p) | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| // Next parses the next line in the file. It returns true if a line was parsed | ||||
| // and false if it reached the end of the file. | ||||
| func (p *Parser) Next() bool { | ||||
| 	if !p.keepRanges && p.rangeStart < p.rangeEnd { | ||||
| 		p.rangeStart++ | ||||
| 		return true | ||||
| 	} | ||||
| 	p.comment = nil | ||||
| 	p.field = p.field[:0] | ||||
| 	p.parsedRange = false | ||||
|  | ||||
| 	for p.scanner.Scan() { | ||||
| 		b := p.scanner.Bytes() | ||||
| 		if len(b) == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		if b[0] == '#' { | ||||
| 			if p.commentHandler != nil { | ||||
| 				p.commentHandler(strings.TrimSpace(string(b[1:]))) | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		// Parse line | ||||
| 		if i := bytes.IndexByte(b, '#'); i != -1 { | ||||
| 			p.comment = bytes.TrimSpace(b[i+1:]) | ||||
| 			b = b[:i] | ||||
| 		} | ||||
| 		if b[0] == '@' { | ||||
| 			if p.partHandler != nil { | ||||
| 				p.field = append(p.field, bytes.TrimSpace(b[1:])) | ||||
| 				p.partHandler(p) | ||||
| 				p.field = p.field[:0] | ||||
| 			} | ||||
| 			p.comment = nil | ||||
| 			continue | ||||
| 		} | ||||
| 		for { | ||||
| 			i := bytes.IndexByte(b, ';') | ||||
| 			if i == -1 { | ||||
| 				p.field = append(p.field, bytes.TrimSpace(b)) | ||||
| 				break | ||||
| 			} | ||||
| 			p.field = append(p.field, bytes.TrimSpace(b[:i])) | ||||
| 			b = b[i+1:] | ||||
| 		} | ||||
| 		if !p.keepRanges { | ||||
| 			p.rangeStart, p.rangeEnd = p.getRange(0) | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	p.setError(p.scanner.Err()) | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func parseRune(b []byte) (rune, error) { | ||||
| 	if len(b) > 2 && b[0] == 'U' && b[1] == '+' { | ||||
| 		b = b[2:] | ||||
| 	} | ||||
| 	x, err := strconv.ParseUint(string(b), 16, 32) | ||||
| 	return rune(x), err | ||||
| } | ||||
|  | ||||
| func (p *Parser) parseRune(b []byte) rune { | ||||
| 	x, err := parseRune(b) | ||||
| 	p.setError(err) | ||||
| 	return x | ||||
| } | ||||
|  | ||||
| // Rune parses and returns field i as a rune. | ||||
| func (p *Parser) Rune(i int) rune { | ||||
| 	if i > 0 || p.keepRanges { | ||||
| 		return p.parseRune(p.getField(i)) | ||||
| 	} | ||||
| 	return p.rangeStart | ||||
| } | ||||
|  | ||||
| // Runes interprets and returns field i as a sequence of runes. | ||||
| func (p *Parser) Runes(i int) (runes []rune) { | ||||
| 	add := func(b []byte) { | ||||
| 		if b = bytes.TrimSpace(b); len(b) > 0 { | ||||
| 			runes = append(runes, p.parseRune(b)) | ||||
| 		} | ||||
| 	} | ||||
| 	for b := p.getField(i); ; { | ||||
| 		i := bytes.IndexByte(b, ' ') | ||||
| 		if i == -1 { | ||||
| 			add(b) | ||||
| 			break | ||||
| 		} | ||||
| 		add(b[:i]) | ||||
| 		b = b[i+1:] | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") | ||||
|  | ||||
| 	// reRange matches one line of a legacy rune range. | ||||
| 	reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") | ||||
| ) | ||||
|  | ||||
| // Range parses and returns field i as a rune range. A range is inclusive at | ||||
| // both ends. If the field only has one rune, first and last will be identical. | ||||
| // It supports the legacy format for ranges used in UnicodeData.txt. | ||||
| func (p *Parser) Range(i int) (first, last rune) { | ||||
| 	if !p.keepRanges { | ||||
| 		return p.rangeStart, p.rangeStart | ||||
| 	} | ||||
| 	return p.getRange(i) | ||||
| } | ||||
|  | ||||
| func (p *Parser) getRange(i int) (first, last rune) { | ||||
| 	b := p.getField(i) | ||||
| 	if k := bytes.Index(b, []byte("..")); k != -1 { | ||||
| 		return p.parseRune(b[:k]), p.parseRune(b[k+2:]) | ||||
| 	} | ||||
| 	// The first field may not be a rune, in which case we may ignore any error | ||||
| 	// and set the range as 0..0. | ||||
| 	x, err := parseRune(b) | ||||
| 	if err != nil { | ||||
| 		// Disable range parsing henceforth. This ensures that an error will be | ||||
| 		// returned if the user subsequently will try to parse this field as | ||||
| 		// a Rune. | ||||
| 		p.keepRanges = true | ||||
| 	} | ||||
| 	// Special case for UnicodeData that was retained for backwards compatibility. | ||||
| 	if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { | ||||
| 		if p.parsedRange { | ||||
| 			return p.rangeStart, p.rangeEnd | ||||
| 		} | ||||
| 		mf := reRange.FindStringSubmatch(p.scanner.Text()) | ||||
| 		if mf == nil || !p.scanner.Scan() { | ||||
| 			p.setError(errIncorrectLegacyRange) | ||||
| 			return x, x | ||||
| 		} | ||||
| 		// Using Bytes would be more efficient here, but Text is a lot easier | ||||
| 		// and this is not a frequent case. | ||||
| 		ml := reRange.FindStringSubmatch(p.scanner.Text()) | ||||
| 		if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { | ||||
| 			p.setError(errIncorrectLegacyRange) | ||||
| 			return x, x | ||||
| 		} | ||||
| 		p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) | ||||
| 		p.parsedRange = true | ||||
| 		return p.rangeStart, p.rangeEnd | ||||
| 	} | ||||
| 	return x, x | ||||
| } | ||||
|  | ||||
| // bools recognizes all valid UCD boolean values. | ||||
| var bools = map[string]bool{ | ||||
| 	"":      false, | ||||
| 	"N":     false, | ||||
| 	"No":    false, | ||||
| 	"F":     false, | ||||
| 	"False": false, | ||||
| 	"Y":     true, | ||||
| 	"Yes":   true, | ||||
| 	"T":     true, | ||||
| 	"True":  true, | ||||
| } | ||||
|  | ||||
| // Bool parses and returns field i as a boolean value. | ||||
| func (p *Parser) Bool(i int) bool { | ||||
| 	b := p.getField(i) | ||||
| 	for s, v := range bools { | ||||
| 		if bstrEq(b, s) { | ||||
| 			return v | ||||
| 		} | ||||
| 	} | ||||
| 	p.setError(strconv.ErrSyntax) | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Int parses and returns field i as an integer value. | ||||
| func (p *Parser) Int(i int) int { | ||||
| 	x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) | ||||
| 	p.setError(err) | ||||
| 	return int(x) | ||||
| } | ||||
|  | ||||
| // Uint parses and returns field i as an unsigned integer value. | ||||
| func (p *Parser) Uint(i int) uint { | ||||
| 	x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) | ||||
| 	p.setError(err) | ||||
| 	return uint(x) | ||||
| } | ||||
|  | ||||
| // Float parses and returns field i as a decimal value. | ||||
| func (p *Parser) Float(i int) float64 { | ||||
| 	x, err := strconv.ParseFloat(string(p.getField(i)), 64) | ||||
| 	p.setError(err) | ||||
| 	return x | ||||
| } | ||||
|  | ||||
| // String parses and returns field i as a string value. | ||||
| func (p *Parser) String(i int) string { | ||||
| 	return string(p.getField(i)) | ||||
| } | ||||
|  | ||||
| // Strings parses and returns field i as a space-separated list of strings. | ||||
| func (p *Parser) Strings(i int) []string { | ||||
| 	ss := strings.Split(string(p.getField(i)), " ") | ||||
| 	for i, s := range ss { | ||||
| 		ss[i] = strings.TrimSpace(s) | ||||
| 	} | ||||
| 	return ss | ||||
| } | ||||
|  | ||||
| // Comment returns the comments for the current line. | ||||
| func (p *Parser) Comment() string { | ||||
| 	return string(p.comment) | ||||
| } | ||||
|  | ||||
| var errUndefinedEnum = errors.New("ucd: undefined enum value") | ||||
|  | ||||
| // Enum interprets and returns field i as a value that must be one of the values | ||||
| // in enum. | ||||
| func (p *Parser) Enum(i int, enum ...string) string { | ||||
| 	b := p.getField(i) | ||||
| 	for _, s := range enum { | ||||
| 		if bstrEq(b, s) { | ||||
| 			return s | ||||
| 		} | ||||
| 	} | ||||
| 	p.setError(errUndefinedEnum) | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func bstrEq(b []byte, s string) bool { | ||||
| 	if len(b) != len(s) { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i, c := range b { | ||||
| 		if c != s[i] { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
							
								
								
									
										342
									
								
								vendor/golang.org/x/text/secure/bidirule/bidirule.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										342
									
								
								vendor/golang.org/x/text/secure/bidirule/bidirule.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,342 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package bidirule implements the Bidi Rule defined by RFC 5893. | ||||
| // | ||||
| // This package is under development. The API may change without notice and | ||||
| // without preserving backward compatibility. | ||||
| package bidirule | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"golang.org/x/text/transform" | ||||
| 	"golang.org/x/text/unicode/bidi" | ||||
| ) | ||||
|  | ||||
| // This file contains an implementation of RFC 5893: Right-to-Left Scripts for | ||||
| // Internationalized Domain Names for Applications (IDNA) | ||||
| // | ||||
| // A label is an individual component of a domain name.  Labels are usually | ||||
| // shown separated by dots; for example, the domain name "www.example.com" is | ||||
| // composed of three labels: "www", "example", and "com". | ||||
| // | ||||
| // An RTL label is a label that contains at least one character of class R, AL, | ||||
| // or AN. An LTR label is any label that is not an RTL label. | ||||
| // | ||||
| // A "Bidi domain name" is a domain name that contains at least one RTL label. | ||||
| // | ||||
| //  The following guarantees can be made based on the above: | ||||
| // | ||||
| //  o  In a domain name consisting of only labels that satisfy the rule, | ||||
| //     the requirements of Section 3 are satisfied.  Note that even LTR | ||||
| //     labels and pure ASCII labels have to be tested. | ||||
| // | ||||
| //  o  In a domain name consisting of only LDH labels (as defined in the | ||||
| //     Definitions document [RFC5890]) and labels that satisfy the rule, | ||||
| //     the requirements of Section 3 are satisfied as long as a label | ||||
| //     that starts with an ASCII digit does not come after a | ||||
| //     right-to-left label. | ||||
| // | ||||
| //  No guarantee is given for other combinations. | ||||
|  | ||||
| // ErrInvalid indicates a label is invalid according to the Bidi Rule. | ||||
| var ErrInvalid = errors.New("bidirule: failed Bidi Rule") | ||||
|  | ||||
| type ruleState uint8 | ||||
|  | ||||
| const ( | ||||
| 	ruleInitial ruleState = iota | ||||
| 	ruleLTR | ||||
| 	ruleLTRFinal | ||||
| 	ruleRTL | ||||
| 	ruleRTLFinal | ||||
| 	ruleInvalid | ||||
| ) | ||||
|  | ||||
| type ruleTransition struct { | ||||
| 	next ruleState | ||||
| 	mask uint16 | ||||
| } | ||||
|  | ||||
| var transitions = [...][2]ruleTransition{ | ||||
| 	// [2.1] The first character must be a character with Bidi property L, R, or | ||||
| 	// AL. If it has the R or AL property, it is an RTL label; if it has the L | ||||
| 	// property, it is an LTR label. | ||||
| 	ruleInitial: { | ||||
| 		{ruleLTRFinal, 1 << bidi.L}, | ||||
| 		{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL}, | ||||
| 	}, | ||||
| 	ruleRTL: { | ||||
| 		// [2.3] In an RTL label, the end of the label must be a character with | ||||
| 		// Bidi property R, AL, EN, or AN, followed by zero or more characters | ||||
| 		// with Bidi property NSM. | ||||
| 		{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN}, | ||||
|  | ||||
| 		// [2.2] In an RTL label, only characters with the Bidi properties R, | ||||
| 		// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed. | ||||
| 		// We exclude the entries from [2.3] | ||||
| 		{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM}, | ||||
| 	}, | ||||
| 	ruleRTLFinal: { | ||||
| 		// [2.3] In an RTL label, the end of the label must be a character with | ||||
| 		// Bidi property R, AL, EN, or AN, followed by zero or more characters | ||||
| 		// with Bidi property NSM. | ||||
| 		{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN | 1<<bidi.NSM}, | ||||
|  | ||||
| 		// [2.2] In an RTL label, only characters with the Bidi properties R, | ||||
| 		// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed. | ||||
| 		// We exclude the entries from [2.3] and NSM. | ||||
| 		{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN}, | ||||
| 	}, | ||||
| 	ruleLTR: { | ||||
| 		// [2.6] In an LTR label, the end of the label must be a character with | ||||
| 		// Bidi property L or EN, followed by zero or more characters with Bidi | ||||
| 		// property NSM. | ||||
| 		{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN}, | ||||
|  | ||||
| 		// [2.5] In an LTR label, only characters with the Bidi properties L, | ||||
| 		// EN, ES, CS, ET, ON, BN, or NSM are allowed. | ||||
| 		// We exclude the entries from [2.6]. | ||||
| 		{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM}, | ||||
| 	}, | ||||
| 	ruleLTRFinal: { | ||||
| 		// [2.6] In an LTR label, the end of the label must be a character with | ||||
| 		// Bidi property L or EN, followed by zero or more characters with Bidi | ||||
| 		// property NSM. | ||||
| 		{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN | 1<<bidi.NSM}, | ||||
|  | ||||
| 		// [2.5] In an LTR label, only characters with the Bidi properties L, | ||||
| 		// EN, ES, CS, ET, ON, BN, or NSM are allowed. | ||||
| 		// We exclude the entries from [2.6]. | ||||
| 		{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN}, | ||||
| 	}, | ||||
| 	ruleInvalid: { | ||||
| 		{ruleInvalid, 0}, | ||||
| 		{ruleInvalid, 0}, | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| // [2.4] In an RTL label, if an EN is present, no AN may be present, and | ||||
| // vice versa. | ||||
| const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN) | ||||
|  | ||||
| // From RFC 5893 | ||||
| // An RTL label is a label that contains at least one character of type | ||||
| // R, AL, or AN. | ||||
| // | ||||
| // An LTR label is any label that is not an RTL label. | ||||
|  | ||||
| // Direction reports the direction of the given label as defined by RFC 5893. | ||||
| // The Bidi Rule does not have to be applied to labels of the category | ||||
| // LeftToRight. | ||||
| func Direction(b []byte) bidi.Direction { | ||||
| 	for i := 0; i < len(b); { | ||||
| 		e, sz := bidi.Lookup(b[i:]) | ||||
| 		if sz == 0 { | ||||
| 			i++ | ||||
| 		} | ||||
| 		c := e.Class() | ||||
| 		if c == bidi.R || c == bidi.AL || c == bidi.AN { | ||||
| 			return bidi.RightToLeft | ||||
| 		} | ||||
| 		i += sz | ||||
| 	} | ||||
| 	return bidi.LeftToRight | ||||
| } | ||||
|  | ||||
| // DirectionString reports the direction of the given label as defined by RFC | ||||
| // 5893. The Bidi Rule does not have to be applied to labels of the category | ||||
| // LeftToRight. | ||||
| func DirectionString(s string) bidi.Direction { | ||||
| 	for i := 0; i < len(s); { | ||||
| 		e, sz := bidi.LookupString(s[i:]) | ||||
| 		if sz == 0 { | ||||
| 			i++ | ||||
| 		} | ||||
| 		c := e.Class() | ||||
| 		if c == bidi.R || c == bidi.AL || c == bidi.AN { | ||||
| 			return bidi.RightToLeft | ||||
| 		} | ||||
| 		i += sz | ||||
| 	} | ||||
| 	return bidi.LeftToRight | ||||
| } | ||||
|  | ||||
| // Valid reports whether b conforms to the BiDi rule. | ||||
| func Valid(b []byte) bool { | ||||
| 	var t Transformer | ||||
| 	if n, ok := t.advance(b); !ok || n < len(b) { | ||||
| 		return false | ||||
| 	} | ||||
| 	return t.isFinal() | ||||
| } | ||||
|  | ||||
| // ValidString reports whether s conforms to the BiDi rule. | ||||
| func ValidString(s string) bool { | ||||
| 	var t Transformer | ||||
| 	if n, ok := t.advanceString(s); !ok || n < len(s) { | ||||
| 		return false | ||||
| 	} | ||||
| 	return t.isFinal() | ||||
| } | ||||
|  | ||||
| // New returns a Transformer that verifies that input adheres to the Bidi Rule. | ||||
| func New() *Transformer { | ||||
| 	return &Transformer{} | ||||
| } | ||||
|  | ||||
| // Transformer implements transform.Transform. | ||||
| type Transformer struct { | ||||
| 	state  ruleState | ||||
| 	hasRTL bool | ||||
| 	seen   uint16 | ||||
| } | ||||
|  | ||||
| // A rule can only be violated for "Bidi Domain names", meaning if one of the | ||||
| // following categories has been observed. | ||||
| func (t *Transformer) isRTL() bool { | ||||
| 	const isRTL = 1<<bidi.R | 1<<bidi.AL | 1<<bidi.AN | ||||
| 	return t.seen&isRTL != 0 | ||||
| } | ||||
|  | ||||
| func (t *Transformer) isFinal() bool { | ||||
| 	if !t.isRTL() { | ||||
| 		return true | ||||
| 	} | ||||
| 	return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial | ||||
| } | ||||
|  | ||||
| // Reset implements transform.Transformer. | ||||
| func (t *Transformer) Reset() { *t = Transformer{} } | ||||
|  | ||||
| // Transform implements transform.Transformer. This Transformer has state and | ||||
| // needs to be reset between uses. | ||||
| func (t *Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { | ||||
| 	if len(dst) < len(src) { | ||||
| 		src = src[:len(dst)] | ||||
| 		atEOF = false | ||||
| 		err = transform.ErrShortDst | ||||
| 	} | ||||
| 	n, err1 := t.Span(src, atEOF) | ||||
| 	copy(dst, src[:n]) | ||||
| 	if err == nil || err1 != nil && err1 != transform.ErrShortSrc { | ||||
| 		err = err1 | ||||
| 	} | ||||
| 	return n, n, err | ||||
| } | ||||
|  | ||||
| // Span returns the first n bytes of src that conform to the Bidi rule. | ||||
| func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) { | ||||
| 	if t.state == ruleInvalid && t.isRTL() { | ||||
| 		return 0, ErrInvalid | ||||
| 	} | ||||
| 	n, ok := t.advance(src) | ||||
| 	switch { | ||||
| 	case !ok: | ||||
| 		err = ErrInvalid | ||||
| 	case n < len(src): | ||||
| 		if !atEOF { | ||||
| 			err = transform.ErrShortSrc | ||||
| 			break | ||||
| 		} | ||||
| 		err = ErrInvalid | ||||
| 	case !t.isFinal(): | ||||
| 		err = ErrInvalid | ||||
| 	} | ||||
| 	return n, err | ||||
| } | ||||
|  | ||||
| // Precomputing the ASCII values decreases running time for the ASCII fast path | ||||
| // by about 30%. | ||||
| var asciiTable [128]bidi.Properties | ||||
|  | ||||
| func init() { | ||||
| 	for i := range asciiTable { | ||||
| 		p, _ := bidi.LookupRune(rune(i)) | ||||
| 		asciiTable[i] = p | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (t *Transformer) advance(s []byte) (n int, ok bool) { | ||||
| 	var e bidi.Properties | ||||
| 	var sz int | ||||
| 	for n < len(s) { | ||||
| 		if s[n] < utf8.RuneSelf { | ||||
| 			e, sz = asciiTable[s[n]], 1 | ||||
| 		} else { | ||||
| 			e, sz = bidi.Lookup(s[n:]) | ||||
| 			if sz <= 1 { | ||||
| 				if sz == 1 { | ||||
| 					// We always consider invalid UTF-8 to be invalid, even if | ||||
| 					// the string has not yet been determined to be RTL. | ||||
| 					// TODO: is this correct? | ||||
| 					return n, false | ||||
| 				} | ||||
| 				return n, true // incomplete UTF-8 encoding | ||||
| 			} | ||||
| 		} | ||||
| 		// TODO: using CompactClass would result in noticeable speedup. | ||||
| 		// See unicode/bidi/prop.go:Properties.CompactClass. | ||||
| 		c := uint16(1 << e.Class()) | ||||
| 		t.seen |= c | ||||
| 		if t.seen&exclusiveRTL == exclusiveRTL { | ||||
| 			t.state = ruleInvalid | ||||
| 			return n, false | ||||
| 		} | ||||
| 		switch tr := transitions[t.state]; { | ||||
| 		case tr[0].mask&c != 0: | ||||
| 			t.state = tr[0].next | ||||
| 		case tr[1].mask&c != 0: | ||||
| 			t.state = tr[1].next | ||||
| 		default: | ||||
| 			t.state = ruleInvalid | ||||
| 			if t.isRTL() { | ||||
| 				return n, false | ||||
| 			} | ||||
| 		} | ||||
| 		n += sz | ||||
| 	} | ||||
| 	return n, true | ||||
| } | ||||
|  | ||||
| func (t *Transformer) advanceString(s string) (n int, ok bool) { | ||||
| 	var e bidi.Properties | ||||
| 	var sz int | ||||
| 	for n < len(s) { | ||||
| 		if s[n] < utf8.RuneSelf { | ||||
| 			e, sz = asciiTable[s[n]], 1 | ||||
| 		} else { | ||||
| 			e, sz = bidi.LookupString(s[n:]) | ||||
| 			if sz <= 1 { | ||||
| 				if sz == 1 { | ||||
| 					return n, false // invalid UTF-8 | ||||
| 				} | ||||
| 				return n, true // incomplete UTF-8 encoding | ||||
| 			} | ||||
| 		} | ||||
| 		// TODO: using CompactClass results in noticeable speedup. | ||||
| 		// See unicode/bidi/prop.go:Properties.CompactClass. | ||||
| 		c := uint16(1 << e.Class()) | ||||
| 		t.seen |= c | ||||
| 		if t.seen&exclusiveRTL == exclusiveRTL { | ||||
| 			t.state = ruleInvalid | ||||
| 			return n, false | ||||
| 		} | ||||
| 		switch tr := transitions[t.state]; { | ||||
| 		case tr[0].mask&c != 0: | ||||
| 			t.state = tr[0].next | ||||
| 		case tr[1].mask&c != 0: | ||||
| 			t.state = tr[1].next | ||||
| 		default: | ||||
| 			t.state = ruleInvalid | ||||
| 			if t.isRTL() { | ||||
| 				return n, false | ||||
| 			} | ||||
| 		} | ||||
| 		n += sz | ||||
| 	} | ||||
| 	return n, true | ||||
| } | ||||
							
								
								
									
										705
									
								
								vendor/golang.org/x/text/transform/transform.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										705
									
								
								vendor/golang.org/x/text/transform/transform.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,705 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package transform provides reader and writer wrappers that transform the | ||||
| // bytes passing through as well as various transformations. Example | ||||
| // transformations provided by other packages include normalization and | ||||
| // conversion between character sets. | ||||
| package transform // import "golang.org/x/text/transform" | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrShortDst means that the destination buffer was too short to | ||||
| 	// receive all of the transformed bytes. | ||||
| 	ErrShortDst = errors.New("transform: short destination buffer") | ||||
|  | ||||
| 	// ErrShortSrc means that the source buffer has insufficient data to | ||||
| 	// complete the transformation. | ||||
| 	ErrShortSrc = errors.New("transform: short source buffer") | ||||
|  | ||||
| 	// ErrEndOfSpan means that the input and output (the transformed input) | ||||
| 	// are not identical. | ||||
| 	ErrEndOfSpan = errors.New("transform: input and output are not identical") | ||||
|  | ||||
| 	// errInconsistentByteCount means that Transform returned success (nil | ||||
| 	// error) but also returned nSrc inconsistent with the src argument. | ||||
| 	errInconsistentByteCount = errors.New("transform: inconsistent byte count returned") | ||||
|  | ||||
| 	// errShortInternal means that an internal buffer is not large enough | ||||
| 	// to make progress and the Transform operation must be aborted. | ||||
| 	errShortInternal = errors.New("transform: short internal buffer") | ||||
| ) | ||||
|  | ||||
| // Transformer transforms bytes. | ||||
| type Transformer interface { | ||||
| 	// Transform writes to dst the transformed bytes read from src, and | ||||
| 	// returns the number of dst bytes written and src bytes read. The | ||||
| 	// atEOF argument tells whether src represents the last bytes of the | ||||
| 	// input. | ||||
| 	// | ||||
| 	// Callers should always process the nDst bytes produced and account | ||||
| 	// for the nSrc bytes consumed before considering the error err. | ||||
| 	// | ||||
| 	// A nil error means that all of the transformed bytes (whether freshly | ||||
| 	// transformed from src or left over from previous Transform calls) | ||||
| 	// were written to dst. A nil error can be returned regardless of | ||||
| 	// whether atEOF is true. If err is nil then nSrc must equal len(src); | ||||
| 	// the converse is not necessarily true. | ||||
| 	// | ||||
| 	// ErrShortDst means that dst was too short to receive all of the | ||||
| 	// transformed bytes. ErrShortSrc means that src had insufficient data | ||||
| 	// to complete the transformation. If both conditions apply, then | ||||
| 	// either error may be returned. Other than the error conditions listed | ||||
| 	// here, implementations are free to report other errors that arise. | ||||
| 	Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) | ||||
|  | ||||
| 	// Reset resets the state and allows a Transformer to be reused. | ||||
| 	Reset() | ||||
| } | ||||
|  | ||||
| // SpanningTransformer extends the Transformer interface with a Span method | ||||
| // that determines how much of the input already conforms to the Transformer. | ||||
| type SpanningTransformer interface { | ||||
| 	Transformer | ||||
|  | ||||
| 	// Span returns a position in src such that transforming src[:n] results in | ||||
| 	// identical output src[:n] for these bytes. It does not necessarily return | ||||
| 	// the largest such n. The atEOF argument tells whether src represents the | ||||
| 	// last bytes of the input. | ||||
| 	// | ||||
| 	// Callers should always account for the n bytes consumed before | ||||
| 	// considering the error err. | ||||
| 	// | ||||
| 	// A nil error means that all input bytes are known to be identical to the | ||||
| 	// output produced by the Transformer. A nil error can be be returned | ||||
| 	// regardless of whether atEOF is true. If err is nil, then then n must | ||||
| 	// equal len(src); the converse is not necessarily true. | ||||
| 	// | ||||
| 	// ErrEndOfSpan means that the Transformer output may differ from the | ||||
| 	// input after n bytes. Note that n may be len(src), meaning that the output | ||||
| 	// would contain additional bytes after otherwise identical output. | ||||
| 	// ErrShortSrc means that src had insufficient data to determine whether the | ||||
| 	// remaining bytes would change. Other than the error conditions listed | ||||
| 	// here, implementations are free to report other errors that arise. | ||||
| 	// | ||||
| 	// Calling Span can modify the Transformer state as a side effect. In | ||||
| 	// effect, it does the transformation just as calling Transform would, only | ||||
| 	// without copying to a destination buffer and only up to a point it can | ||||
| 	// determine the input and output bytes are the same. This is obviously more | ||||
| 	// limited than calling Transform, but can be more efficient in terms of | ||||
| 	// copying and allocating buffers. Calls to Span and Transform may be | ||||
| 	// interleaved. | ||||
| 	Span(src []byte, atEOF bool) (n int, err error) | ||||
| } | ||||
|  | ||||
| // NopResetter can be embedded by implementations of Transformer to add a nop | ||||
| // Reset method. | ||||
| type NopResetter struct{} | ||||
|  | ||||
| // Reset implements the Reset method of the Transformer interface. | ||||
| func (NopResetter) Reset() {} | ||||
|  | ||||
| // Reader wraps another io.Reader by transforming the bytes read. | ||||
| type Reader struct { | ||||
| 	r   io.Reader | ||||
| 	t   Transformer | ||||
| 	err error | ||||
|  | ||||
| 	// dst[dst0:dst1] contains bytes that have been transformed by t but | ||||
| 	// not yet copied out via Read. | ||||
| 	dst        []byte | ||||
| 	dst0, dst1 int | ||||
|  | ||||
| 	// src[src0:src1] contains bytes that have been read from r but not | ||||
| 	// yet transformed through t. | ||||
| 	src        []byte | ||||
| 	src0, src1 int | ||||
|  | ||||
| 	// transformComplete is whether the transformation is complete, | ||||
| 	// regardless of whether or not it was successful. | ||||
| 	transformComplete bool | ||||
| } | ||||
|  | ||||
| const defaultBufSize = 4096 | ||||
|  | ||||
| // NewReader returns a new Reader that wraps r by transforming the bytes read | ||||
| // via t. It calls Reset on t. | ||||
| func NewReader(r io.Reader, t Transformer) *Reader { | ||||
| 	t.Reset() | ||||
| 	return &Reader{ | ||||
| 		r:   r, | ||||
| 		t:   t, | ||||
| 		dst: make([]byte, defaultBufSize), | ||||
| 		src: make([]byte, defaultBufSize), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Read implements the io.Reader interface. | ||||
| func (r *Reader) Read(p []byte) (int, error) { | ||||
| 	n, err := 0, error(nil) | ||||
| 	for { | ||||
| 		// Copy out any transformed bytes and return the final error if we are done. | ||||
| 		if r.dst0 != r.dst1 { | ||||
| 			n = copy(p, r.dst[r.dst0:r.dst1]) | ||||
| 			r.dst0 += n | ||||
| 			if r.dst0 == r.dst1 && r.transformComplete { | ||||
| 				return n, r.err | ||||
| 			} | ||||
| 			return n, nil | ||||
| 		} else if r.transformComplete { | ||||
| 			return 0, r.err | ||||
| 		} | ||||
|  | ||||
| 		// Try to transform some source bytes, or to flush the transformer if we | ||||
| 		// are out of source bytes. We do this even if r.r.Read returned an error. | ||||
| 		// As the io.Reader documentation says, "process the n > 0 bytes returned | ||||
| 		// before considering the error". | ||||
| 		if r.src0 != r.src1 || r.err != nil { | ||||
| 			r.dst0 = 0 | ||||
| 			r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) | ||||
| 			r.src0 += n | ||||
|  | ||||
| 			switch { | ||||
| 			case err == nil: | ||||
| 				if r.src0 != r.src1 { | ||||
| 					r.err = errInconsistentByteCount | ||||
| 				} | ||||
| 				// The Transform call was successful; we are complete if we | ||||
| 				// cannot read more bytes into src. | ||||
| 				r.transformComplete = r.err != nil | ||||
| 				continue | ||||
| 			case err == ErrShortDst && (r.dst1 != 0 || n != 0): | ||||
| 				// Make room in dst by copying out, and try again. | ||||
| 				continue | ||||
| 			case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: | ||||
| 				// Read more bytes into src via the code below, and try again. | ||||
| 			default: | ||||
| 				r.transformComplete = true | ||||
| 				// The reader error (r.err) takes precedence over the | ||||
| 				// transformer error (err) unless r.err is nil or io.EOF. | ||||
| 				if r.err == nil || r.err == io.EOF { | ||||
| 					r.err = err | ||||
| 				} | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// Move any untransformed source bytes to the start of the buffer | ||||
| 		// and read more bytes. | ||||
| 		if r.src0 != 0 { | ||||
| 			r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) | ||||
| 		} | ||||
| 		n, r.err = r.r.Read(r.src[r.src1:]) | ||||
| 		r.src1 += n | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TODO: implement ReadByte (and ReadRune??). | ||||
|  | ||||
| // Writer wraps another io.Writer by transforming the bytes read. | ||||
| // The user needs to call Close to flush unwritten bytes that may | ||||
| // be buffered. | ||||
| type Writer struct { | ||||
| 	w   io.Writer | ||||
| 	t   Transformer | ||||
| 	dst []byte | ||||
|  | ||||
| 	// src[:n] contains bytes that have not yet passed through t. | ||||
| 	src []byte | ||||
| 	n   int | ||||
| } | ||||
|  | ||||
| // NewWriter returns a new Writer that wraps w by transforming the bytes written | ||||
| // via t. It calls Reset on t. | ||||
| func NewWriter(w io.Writer, t Transformer) *Writer { | ||||
| 	t.Reset() | ||||
| 	return &Writer{ | ||||
| 		w:   w, | ||||
| 		t:   t, | ||||
| 		dst: make([]byte, defaultBufSize), | ||||
| 		src: make([]byte, defaultBufSize), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Write implements the io.Writer interface. If there are not enough | ||||
| // bytes available to complete a Transform, the bytes will be buffered | ||||
| // for the next write. Call Close to convert the remaining bytes. | ||||
| func (w *Writer) Write(data []byte) (n int, err error) { | ||||
| 	src := data | ||||
| 	if w.n > 0 { | ||||
| 		// Append bytes from data to the last remainder. | ||||
| 		// TODO: limit the amount copied on first try. | ||||
| 		n = copy(w.src[w.n:], data) | ||||
| 		w.n += n | ||||
| 		src = w.src[:w.n] | ||||
| 	} | ||||
| 	for { | ||||
| 		nDst, nSrc, err := w.t.Transform(w.dst, src, false) | ||||
| 		if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { | ||||
| 			return n, werr | ||||
| 		} | ||||
| 		src = src[nSrc:] | ||||
| 		if w.n == 0 { | ||||
| 			n += nSrc | ||||
| 		} else if len(src) <= n { | ||||
| 			// Enough bytes from w.src have been consumed. We make src point | ||||
| 			// to data instead to reduce the copying. | ||||
| 			w.n = 0 | ||||
| 			n -= len(src) | ||||
| 			src = data[n:] | ||||
| 			if n < len(data) && (err == nil || err == ErrShortSrc) { | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
| 		switch err { | ||||
| 		case ErrShortDst: | ||||
| 			// This error is okay as long as we are making progress. | ||||
| 			if nDst > 0 || nSrc > 0 { | ||||
| 				continue | ||||
| 			} | ||||
| 		case ErrShortSrc: | ||||
| 			if len(src) < len(w.src) { | ||||
| 				m := copy(w.src, src) | ||||
| 				// If w.n > 0, bytes from data were already copied to w.src and n | ||||
| 				// was already set to the number of bytes consumed. | ||||
| 				if w.n == 0 { | ||||
| 					n += m | ||||
| 				} | ||||
| 				w.n = m | ||||
| 				err = nil | ||||
| 			} else if nDst > 0 || nSrc > 0 { | ||||
| 				// Not enough buffer to store the remainder. Keep processing as | ||||
| 				// long as there is progress. Without this case, transforms that | ||||
| 				// require a lookahead larger than the buffer may result in an | ||||
| 				// error. This is not something one may expect to be common in | ||||
| 				// practice, but it may occur when buffers are set to small | ||||
| 				// sizes during testing. | ||||
| 				continue | ||||
| 			} | ||||
| 		case nil: | ||||
| 			if w.n > 0 { | ||||
| 				err = errInconsistentByteCount | ||||
| 			} | ||||
| 		} | ||||
| 		return n, err | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Close implements the io.Closer interface. | ||||
| func (w *Writer) Close() error { | ||||
| 	src := w.src[:w.n] | ||||
| 	for { | ||||
| 		nDst, nSrc, err := w.t.Transform(w.dst, src, true) | ||||
| 		if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { | ||||
| 			return werr | ||||
| 		} | ||||
| 		if err != ErrShortDst { | ||||
| 			return err | ||||
| 		} | ||||
| 		src = src[nSrc:] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type nop struct{ NopResetter } | ||||
|  | ||||
| func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { | ||||
| 	n := copy(dst, src) | ||||
| 	if n < len(src) { | ||||
| 		err = ErrShortDst | ||||
| 	} | ||||
| 	return n, n, err | ||||
| } | ||||
|  | ||||
| func (nop) Span(src []byte, atEOF bool) (n int, err error) { | ||||
| 	return len(src), nil | ||||
| } | ||||
|  | ||||
| type discard struct{ NopResetter } | ||||
|  | ||||
| func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { | ||||
| 	return 0, len(src), nil | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	// Discard is a Transformer for which all Transform calls succeed | ||||
| 	// by consuming all bytes and writing nothing. | ||||
| 	Discard Transformer = discard{} | ||||
|  | ||||
| 	// Nop is a SpanningTransformer that copies src to dst. | ||||
| 	Nop SpanningTransformer = nop{} | ||||
| ) | ||||
|  | ||||
| // chain is a sequence of links. A chain with N Transformers has N+1 links and | ||||
| // N+1 buffers. Of those N+1 buffers, the first and last are the src and dst | ||||
| // buffers given to chain.Transform and the middle N-1 buffers are intermediate | ||||
| // buffers owned by the chain. The i'th link transforms bytes from the i'th | ||||
| // buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer | ||||
| // chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). | ||||
| type chain struct { | ||||
| 	link []link | ||||
| 	err  error | ||||
| 	// errStart is the index at which the error occurred plus 1. Processing | ||||
| 	// errStart at this level at the next call to Transform. As long as | ||||
| 	// errStart > 0, chain will not consume any more source bytes. | ||||
| 	errStart int | ||||
| } | ||||
|  | ||||
| func (c *chain) fatalError(errIndex int, err error) { | ||||
| 	if i := errIndex + 1; i > c.errStart { | ||||
| 		c.errStart = i | ||||
| 		c.err = err | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type link struct { | ||||
| 	t Transformer | ||||
| 	// b[p:n] holds the bytes to be transformed by t. | ||||
| 	b []byte | ||||
| 	p int | ||||
| 	n int | ||||
| } | ||||
|  | ||||
| func (l *link) src() []byte { | ||||
| 	return l.b[l.p:l.n] | ||||
| } | ||||
|  | ||||
| func (l *link) dst() []byte { | ||||
| 	return l.b[l.n:] | ||||
| } | ||||
|  | ||||
| // Chain returns a Transformer that applies t in sequence. | ||||
| func Chain(t ...Transformer) Transformer { | ||||
| 	if len(t) == 0 { | ||||
| 		return nop{} | ||||
| 	} | ||||
| 	c := &chain{link: make([]link, len(t)+1)} | ||||
| 	for i, tt := range t { | ||||
| 		c.link[i].t = tt | ||||
| 	} | ||||
| 	// Allocate intermediate buffers. | ||||
| 	b := make([][defaultBufSize]byte, len(t)-1) | ||||
| 	for i := range b { | ||||
| 		c.link[i+1].b = b[i][:] | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // Reset resets the state of Chain. It calls Reset on all the Transformers. | ||||
| func (c *chain) Reset() { | ||||
| 	for i, l := range c.link { | ||||
| 		if l.t != nil { | ||||
| 			l.t.Reset() | ||||
| 		} | ||||
| 		c.link[i].p, c.link[i].n = 0, 0 | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TODO: make chain use Span (is going to be fun to implement!) | ||||
|  | ||||
| // Transform applies the transformers of c in sequence. | ||||
| func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { | ||||
| 	// Set up src and dst in the chain. | ||||
| 	srcL := &c.link[0] | ||||
| 	dstL := &c.link[len(c.link)-1] | ||||
| 	srcL.b, srcL.p, srcL.n = src, 0, len(src) | ||||
| 	dstL.b, dstL.n = dst, 0 | ||||
| 	var lastFull, needProgress bool // for detecting progress | ||||
|  | ||||
| 	// i is the index of the next Transformer to apply, for i in [low, high]. | ||||
| 	// low is the lowest index for which c.link[low] may still produce bytes. | ||||
| 	// high is the highest index for which c.link[high] has a Transformer. | ||||
| 	// The error returned by Transform determines whether to increase or | ||||
| 	// decrease i. We try to completely fill a buffer before converting it. | ||||
| 	for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { | ||||
| 		in, out := &c.link[i], &c.link[i+1] | ||||
| 		nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) | ||||
| 		out.n += nDst | ||||
| 		in.p += nSrc | ||||
| 		if i > 0 && in.p == in.n { | ||||
| 			in.p, in.n = 0, 0 | ||||
| 		} | ||||
| 		needProgress, lastFull = lastFull, false | ||||
| 		switch err0 { | ||||
| 		case ErrShortDst: | ||||
| 			// Process the destination buffer next. Return if we are already | ||||
| 			// at the high index. | ||||
| 			if i == high { | ||||
| 				return dstL.n, srcL.p, ErrShortDst | ||||
| 			} | ||||
| 			if out.n != 0 { | ||||
| 				i++ | ||||
| 				// If the Transformer at the next index is not able to process any | ||||
| 				// source bytes there is nothing that can be done to make progress | ||||
| 				// and the bytes will remain unprocessed. lastFull is used to | ||||
| 				// detect this and break out of the loop with a fatal error. | ||||
| 				lastFull = true | ||||
| 				continue | ||||
| 			} | ||||
| 			// The destination buffer was too small, but is completely empty. | ||||
| 			// Return a fatal error as this transformation can never complete. | ||||
| 			c.fatalError(i, errShortInternal) | ||||
| 		case ErrShortSrc: | ||||
| 			if i == 0 { | ||||
| 				// Save ErrShortSrc in err. All other errors take precedence. | ||||
| 				err = ErrShortSrc | ||||
| 				break | ||||
| 			} | ||||
| 			// Source bytes were depleted before filling up the destination buffer. | ||||
| 			// Verify we made some progress, move the remaining bytes to the errStart | ||||
| 			// and try to get more source bytes. | ||||
| 			if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { | ||||
| 				// There were not enough source bytes to proceed while the source | ||||
| 				// buffer cannot hold any more bytes. Return a fatal error as this | ||||
| 				// transformation can never complete. | ||||
| 				c.fatalError(i, errShortInternal) | ||||
| 				break | ||||
| 			} | ||||
| 			// in.b is an internal buffer and we can make progress. | ||||
| 			in.p, in.n = 0, copy(in.b, in.src()) | ||||
| 			fallthrough | ||||
| 		case nil: | ||||
| 			// if i == low, we have depleted the bytes at index i or any lower levels. | ||||
| 			// In that case we increase low and i. In all other cases we decrease i to | ||||
| 			// fetch more bytes before proceeding to the next index. | ||||
| 			if i > low { | ||||
| 				i-- | ||||
| 				continue | ||||
| 			} | ||||
| 		default: | ||||
| 			c.fatalError(i, err0) | ||||
| 		} | ||||
| 		// Exhausted level low or fatal error: increase low and continue | ||||
| 		// to process the bytes accepted so far. | ||||
| 		i++ | ||||
| 		low = i | ||||
| 	} | ||||
|  | ||||
| 	// If c.errStart > 0, this means we found a fatal error.  We will clear | ||||
| 	// all upstream buffers. At this point, no more progress can be made | ||||
| 	// downstream, as Transform would have bailed while handling ErrShortDst. | ||||
| 	if c.errStart > 0 { | ||||
| 		for i := 1; i < c.errStart; i++ { | ||||
| 			c.link[i].p, c.link[i].n = 0, 0 | ||||
| 		} | ||||
| 		err, c.errStart, c.err = c.err, 0, nil | ||||
| 	} | ||||
| 	return dstL.n, srcL.p, err | ||||
| } | ||||
|  | ||||
| // Deprecated: use runes.Remove instead. | ||||
| func RemoveFunc(f func(r rune) bool) Transformer { | ||||
| 	return removeF(f) | ||||
| } | ||||
|  | ||||
| type removeF func(r rune) bool | ||||
|  | ||||
| func (removeF) Reset() {} | ||||
|  | ||||
| // Transform implements the Transformer interface. | ||||
| func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { | ||||
| 	for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { | ||||
|  | ||||
| 		if r = rune(src[0]); r < utf8.RuneSelf { | ||||
| 			sz = 1 | ||||
| 		} else { | ||||
| 			r, sz = utf8.DecodeRune(src) | ||||
|  | ||||
| 			if sz == 1 { | ||||
| 				// Invalid rune. | ||||
| 				if !atEOF && !utf8.FullRune(src) { | ||||
| 					err = ErrShortSrc | ||||
| 					break | ||||
| 				} | ||||
| 				// We replace illegal bytes with RuneError. Not doing so might | ||||
| 				// otherwise turn a sequence of invalid UTF-8 into valid UTF-8. | ||||
| 				// The resulting byte sequence may subsequently contain runes | ||||
| 				// for which t(r) is true that were passed unnoticed. | ||||
| 				if !t(r) { | ||||
| 					if nDst+3 > len(dst) { | ||||
| 						err = ErrShortDst | ||||
| 						break | ||||
| 					} | ||||
| 					nDst += copy(dst[nDst:], "\uFFFD") | ||||
| 				} | ||||
| 				nSrc++ | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if !t(r) { | ||||
| 			if nDst+sz > len(dst) { | ||||
| 				err = ErrShortDst | ||||
| 				break | ||||
| 			} | ||||
| 			nDst += copy(dst[nDst:], src[:sz]) | ||||
| 		} | ||||
| 		nSrc += sz | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // grow returns a new []byte that is longer than b, and copies the first n bytes | ||||
| // of b to the start of the new slice. | ||||
| func grow(b []byte, n int) []byte { | ||||
| 	m := len(b) | ||||
| 	if m <= 32 { | ||||
| 		m = 64 | ||||
| 	} else if m <= 256 { | ||||
| 		m *= 2 | ||||
| 	} else { | ||||
| 		m += m >> 1 | ||||
| 	} | ||||
| 	buf := make([]byte, m) | ||||
| 	copy(buf, b[:n]) | ||||
| 	return buf | ||||
| } | ||||
|  | ||||
| const initialBufSize = 128 | ||||
|  | ||||
| // String returns a string with the result of converting s[:n] using t, where | ||||
| // n <= len(s). If err == nil, n will be len(s). It calls Reset on t. | ||||
| func String(t Transformer, s string) (result string, n int, err error) { | ||||
| 	t.Reset() | ||||
| 	if s == "" { | ||||
| 		// Fast path for the common case for empty input. Results in about a | ||||
| 		// 86% reduction of running time for BenchmarkStringLowerEmpty. | ||||
| 		if _, _, err := t.Transform(nil, nil, true); err == nil { | ||||
| 			return "", 0, nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Allocate only once. Note that both dst and src escape when passed to | ||||
| 	// Transform. | ||||
| 	buf := [2 * initialBufSize]byte{} | ||||
| 	dst := buf[:initialBufSize:initialBufSize] | ||||
| 	src := buf[initialBufSize : 2*initialBufSize] | ||||
|  | ||||
| 	// The input string s is transformed in multiple chunks (starting with a | ||||
| 	// chunk size of initialBufSize). nDst and nSrc are per-chunk (or | ||||
| 	// per-Transform-call) indexes, pDst and pSrc are overall indexes. | ||||
| 	nDst, nSrc := 0, 0 | ||||
| 	pDst, pSrc := 0, 0 | ||||
|  | ||||
| 	// pPrefix is the length of a common prefix: the first pPrefix bytes of the | ||||
| 	// result will equal the first pPrefix bytes of s. It is not guaranteed to | ||||
| 	// be the largest such value, but if pPrefix, len(result) and len(s) are | ||||
| 	// all equal after the final transform (i.e. calling Transform with atEOF | ||||
| 	// being true returned nil error) then we don't need to allocate a new | ||||
| 	// result string. | ||||
| 	pPrefix := 0 | ||||
| 	for { | ||||
| 		// Invariant: pDst == pPrefix && pSrc == pPrefix. | ||||
|  | ||||
| 		n := copy(src, s[pSrc:]) | ||||
| 		nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) | ||||
| 		pDst += nDst | ||||
| 		pSrc += nSrc | ||||
|  | ||||
| 		// TODO:  let transformers implement an optional Spanner interface, akin | ||||
| 		// to norm's QuickSpan. This would even allow us to avoid any allocation. | ||||
| 		if !bytes.Equal(dst[:nDst], src[:nSrc]) { | ||||
| 			break | ||||
| 		} | ||||
| 		pPrefix = pSrc | ||||
| 		if err == ErrShortDst { | ||||
| 			// A buffer can only be short if a transformer modifies its input. | ||||
| 			break | ||||
| 		} else if err == ErrShortSrc { | ||||
| 			if nSrc == 0 { | ||||
| 				// No progress was made. | ||||
| 				break | ||||
| 			} | ||||
| 			// Equal so far and !atEOF, so continue checking. | ||||
| 		} else if err != nil || pPrefix == len(s) { | ||||
| 			return string(s[:pPrefix]), pPrefix, err | ||||
| 		} | ||||
| 	} | ||||
| 	// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. | ||||
|  | ||||
| 	// We have transformed the first pSrc bytes of the input s to become pDst | ||||
| 	// transformed bytes. Those transformed bytes are discontiguous: the first | ||||
| 	// pPrefix of them equal s[:pPrefix] and the last nDst of them equal | ||||
| 	// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so | ||||
| 	// that they become one contiguous slice: dst[:pDst]. | ||||
| 	if pPrefix != 0 { | ||||
| 		newDst := dst | ||||
| 		if pDst > len(newDst) { | ||||
| 			newDst = make([]byte, len(s)+nDst-nSrc) | ||||
| 		} | ||||
| 		copy(newDst[pPrefix:pDst], dst[:nDst]) | ||||
| 		copy(newDst[:pPrefix], s[:pPrefix]) | ||||
| 		dst = newDst | ||||
| 	} | ||||
|  | ||||
| 	// Prevent duplicate Transform calls with atEOF being true at the end of | ||||
| 	// the input. Also return if we have an unrecoverable error. | ||||
| 	if (err == nil && pSrc == len(s)) || | ||||
| 		(err != nil && err != ErrShortDst && err != ErrShortSrc) { | ||||
| 		return string(dst[:pDst]), pSrc, err | ||||
| 	} | ||||
|  | ||||
| 	// Transform the remaining input, growing dst and src buffers as necessary. | ||||
| 	for { | ||||
| 		n := copy(src, s[pSrc:]) | ||||
| 		nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) | ||||
| 		pDst += nDst | ||||
| 		pSrc += nSrc | ||||
|  | ||||
| 		// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can | ||||
| 		// make progress. This may avoid excessive allocations. | ||||
| 		if err == ErrShortDst { | ||||
| 			if nDst == 0 { | ||||
| 				dst = grow(dst, pDst) | ||||
| 			} | ||||
| 		} else if err == ErrShortSrc { | ||||
| 			if nSrc == 0 { | ||||
| 				src = grow(src, 0) | ||||
| 			} | ||||
| 		} else if err != nil || pSrc == len(s) { | ||||
| 			return string(dst[:pDst]), pSrc, err | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Bytes returns a new byte slice with the result of converting b[:n] using t, | ||||
| // where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. | ||||
| func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { | ||||
| 	return doAppend(t, 0, make([]byte, len(b)), b) | ||||
| } | ||||
|  | ||||
| // Append appends the result of converting src[:n] using t to dst, where | ||||
| // n <= len(src), If err == nil, n will be len(src). It calls Reset on t. | ||||
| func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { | ||||
| 	if len(dst) == cap(dst) { | ||||
| 		n := len(src) + len(dst) // It is okay for this to be 0. | ||||
| 		b := make([]byte, n) | ||||
| 		dst = b[:copy(b, dst)] | ||||
| 	} | ||||
| 	return doAppend(t, len(dst), dst[:cap(dst)], src) | ||||
| } | ||||
|  | ||||
| func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { | ||||
| 	t.Reset() | ||||
| 	pSrc := 0 | ||||
| 	for { | ||||
| 		nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) | ||||
| 		pDst += nDst | ||||
| 		pSrc += nSrc | ||||
| 		if err != ErrShortDst { | ||||
| 			return dst[:pDst], pSrc, err | ||||
| 		} | ||||
|  | ||||
| 		// Grow the destination buffer, but do not grow as long as we can make | ||||
| 		// progress. This may avoid excessive allocations. | ||||
| 		if nDst == 0 { | ||||
| 			dst = grow(dst, pDst) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										198
									
								
								vendor/golang.org/x/text/unicode/bidi/bidi.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								vendor/golang.org/x/text/unicode/bidi/bidi.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,198 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| //go:generate go run gen.go gen_trieval.go gen_ranges.go | ||||
|  | ||||
| // Package bidi contains functionality for bidirectional text support. | ||||
| // | ||||
| // See http://www.unicode.org/reports/tr9. | ||||
| // | ||||
| // NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways | ||||
| // and without notice. | ||||
| package bidi // import "golang.org/x/text/unicode/bidi" | ||||
|  | ||||
| // TODO: | ||||
| // The following functionality would not be hard to implement, but hinges on | ||||
| // the definition of a Segmenter interface. For now this is up to the user. | ||||
| // - Iterate over paragraphs | ||||
| // - Segmenter to iterate over runs directly from a given text. | ||||
| // Also: | ||||
| // - Transformer for reordering? | ||||
| // - Transformer (validator, really) for Bidi Rule. | ||||
|  | ||||
| // This API tries to avoid dealing with embedding levels for now. Under the hood | ||||
| // these will be computed, but the question is to which extent the user should | ||||
| // know they exist. We should at some point allow the user to specify an | ||||
| // embedding hierarchy, though. | ||||
|  | ||||
| // A Direction indicates the overall flow of text. | ||||
| type Direction int | ||||
|  | ||||
| const ( | ||||
| 	// LeftToRight indicates the text contains no right-to-left characters and | ||||
| 	// that either there are some left-to-right characters or the option | ||||
| 	// DefaultDirection(LeftToRight) was passed. | ||||
| 	LeftToRight Direction = iota | ||||
|  | ||||
| 	// RightToLeft indicates the text contains no left-to-right characters and | ||||
| 	// that either there are some right-to-left characters or the option | ||||
| 	// DefaultDirection(RightToLeft) was passed. | ||||
| 	RightToLeft | ||||
|  | ||||
| 	// Mixed indicates text contains both left-to-right and right-to-left | ||||
| 	// characters. | ||||
| 	Mixed | ||||
|  | ||||
| 	// Neutral means that text contains no left-to-right and right-to-left | ||||
| 	// characters and that no default direction has been set. | ||||
| 	Neutral | ||||
| ) | ||||
|  | ||||
| type options struct{} | ||||
|  | ||||
| // An Option is an option for Bidi processing. | ||||
| type Option func(*options) | ||||
|  | ||||
| // ICU allows the user to define embedding levels. This may be used, for example, | ||||
| // to use hierarchical structure of markup languages to define embeddings. | ||||
| // The following option may be a way to expose this functionality in this API. | ||||
| // // LevelFunc sets a function that associates nesting levels with the given text. | ||||
| // // The levels function will be called with monotonically increasing values for p. | ||||
| // func LevelFunc(levels func(p int) int) Option { | ||||
| // 	panic("unimplemented") | ||||
| // } | ||||
|  | ||||
| // DefaultDirection sets the default direction for a Paragraph. The direction is | ||||
| // overridden if the text contains directional characters. | ||||
| func DefaultDirection(d Direction) Option { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // A Paragraph holds a single Paragraph for Bidi processing. | ||||
| type Paragraph struct { | ||||
| 	// buffers | ||||
| } | ||||
|  | ||||
| // SetBytes configures p for the given paragraph text. It replaces text | ||||
| // previously set by SetBytes or SetString. If b contains a paragraph separator | ||||
| // it will only process the first paragraph and report the number of bytes | ||||
| // consumed from b including this separator. Error may be non-nil if options are | ||||
| // given. | ||||
| func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // SetString configures p for the given paragraph text. It replaces text | ||||
| // previously set by SetBytes or SetString. If b contains a paragraph separator | ||||
| // it will only process the first paragraph and report the number of bytes | ||||
| // consumed from b including this separator. Error may be non-nil if options are | ||||
| // given. | ||||
| func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // IsLeftToRight reports whether the principle direction of rendering for this | ||||
| // paragraphs is left-to-right. If this returns false, the principle direction | ||||
| // of rendering is right-to-left. | ||||
| func (p *Paragraph) IsLeftToRight() bool { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // Direction returns the direction of the text of this paragraph. | ||||
| // | ||||
| // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. | ||||
| func (p *Paragraph) Direction() Direction { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // RunAt reports the Run at the given position of the input text. | ||||
| // | ||||
| // This method can be used for computing line breaks on paragraphs. | ||||
| func (p *Paragraph) RunAt(pos int) Run { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // Order computes the visual ordering of all the runs in a Paragraph. | ||||
| func (p *Paragraph) Order() (Ordering, error) { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // Line computes the visual ordering of runs for a single line starting and | ||||
| // ending at the given positions in the original text. | ||||
| func (p *Paragraph) Line(start, end int) (Ordering, error) { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // An Ordering holds the computed visual order of runs of a Paragraph. Calling | ||||
| // SetBytes or SetString on the originating Paragraph invalidates an Ordering. | ||||
| // The methods of an Ordering should only be called by one goroutine at a time. | ||||
| type Ordering struct{} | ||||
|  | ||||
| // Direction reports the directionality of the runs. | ||||
| // | ||||
| // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. | ||||
| func (o *Ordering) Direction() Direction { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // NumRuns returns the number of runs. | ||||
| func (o *Ordering) NumRuns() int { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // Run returns the ith run within the ordering. | ||||
| func (o *Ordering) Run(i int) Run { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // TODO: perhaps with options. | ||||
| // // Reorder creates a reader that reads the runes in visual order per character. | ||||
| // // Modifiers remain after the runes they modify. | ||||
| // func (l *Runs) Reorder() io.Reader { | ||||
| // 	panic("unimplemented") | ||||
| // } | ||||
|  | ||||
| // A Run is a continuous sequence of characters of a single direction. | ||||
| type Run struct { | ||||
| } | ||||
|  | ||||
| // String returns the text of the run in its original order. | ||||
| func (r *Run) String() string { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // Bytes returns the text of the run in its original order. | ||||
| func (r *Run) Bytes() []byte { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // TODO: methods for | ||||
| // - Display order | ||||
| // - headers and footers | ||||
| // - bracket replacement. | ||||
|  | ||||
| // Direction reports the direction of the run. | ||||
| func (r *Run) Direction() Direction { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // Position of the Run within the text passed to SetBytes or SetString of the | ||||
| // originating Paragraph value. | ||||
| func (r *Run) Pos() (start, end int) { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // AppendReverse reverses the order of characters of in, appends them to out, | ||||
| // and returns the result. Modifiers will still follow the runes they modify. | ||||
| // Brackets are replaced with their counterparts. | ||||
| func AppendReverse(out, in []byte) []byte { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
|  | ||||
| // ReverseString reverses the order of characters in s and returns a new string. | ||||
| // Modifiers will still follow the runes they modify. Brackets are replaced with | ||||
| // their counterparts. | ||||
| func ReverseString(s string) string { | ||||
| 	panic("unimplemented") | ||||
| } | ||||
							
								
								
									
										335
									
								
								vendor/golang.org/x/text/unicode/bidi/bracket.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										335
									
								
								vendor/golang.org/x/text/unicode/bidi/bracket.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,335 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package bidi | ||||
|  | ||||
| import ( | ||||
| 	"container/list" | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| ) | ||||
|  | ||||
| // This file contains a port of the reference implementation of the | ||||
| // Bidi Parentheses Algorithm: | ||||
| // http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java | ||||
| // | ||||
| // The implementation in this file covers definitions BD14-BD16 and rule N0 | ||||
| // of UAX#9. | ||||
| // | ||||
| // Some preprocessing is done for each rune before data is passed to this | ||||
| // algorithm: | ||||
| //  - opening and closing brackets are identified | ||||
| //  - a bracket pair type, like '(' and ')' is assigned a unique identifier that | ||||
| //    is identical for the opening and closing bracket. It is left to do these | ||||
| //    mappings. | ||||
| //  - The BPA algorithm requires that bracket characters that are canonical | ||||
| //    equivalents of each other be able to be substituted for each other. | ||||
| //    It is the responsibility of the caller to do this canonicalization. | ||||
| // | ||||
| // In implementing BD16, this implementation departs slightly from the "logical" | ||||
| // algorithm defined in UAX#9. In particular, the stack referenced there | ||||
| // supports operations that go beyond a "basic" stack. An equivalent | ||||
| // implementation based on a linked list is used here. | ||||
|  | ||||
| // Bidi_Paired_Bracket_Type | ||||
| // BD14. An opening paired bracket is a character whose | ||||
| // Bidi_Paired_Bracket_Type property value is Open. | ||||
| // | ||||
| // BD15. A closing paired bracket is a character whose | ||||
| // Bidi_Paired_Bracket_Type property value is Close. | ||||
| type bracketType byte | ||||
|  | ||||
| const ( | ||||
| 	bpNone bracketType = iota | ||||
| 	bpOpen | ||||
| 	bpClose | ||||
| ) | ||||
|  | ||||
| // bracketPair holds a pair of index values for opening and closing bracket | ||||
| // location of a bracket pair. | ||||
| type bracketPair struct { | ||||
| 	opener int | ||||
| 	closer int | ||||
| } | ||||
|  | ||||
| func (b *bracketPair) String() string { | ||||
| 	return fmt.Sprintf("(%v, %v)", b.opener, b.closer) | ||||
| } | ||||
|  | ||||
| // bracketPairs is a slice of bracketPairs with a sort.Interface implementation. | ||||
| type bracketPairs []bracketPair | ||||
|  | ||||
| func (b bracketPairs) Len() int           { return len(b) } | ||||
| func (b bracketPairs) Swap(i, j int)      { b[i], b[j] = b[j], b[i] } | ||||
| func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener } | ||||
|  | ||||
| // resolvePairedBrackets runs the paired bracket part of the UBA algorithm. | ||||
| // | ||||
| // For each rune, it takes the indexes into the original string, the class the | ||||
| // bracket type (in pairTypes) and the bracket identifier (pairValues). It also | ||||
| // takes the direction type for the start-of-sentence and the embedding level. | ||||
| // | ||||
| // The identifiers for bracket types are the rune of the canonicalized opening | ||||
| // bracket for brackets (open or close) or 0 for runes that are not brackets. | ||||
| func resolvePairedBrackets(s *isolatingRunSequence) { | ||||
| 	p := bracketPairer{ | ||||
| 		sos:              s.sos, | ||||
| 		openers:          list.New(), | ||||
| 		codesIsolatedRun: s.types, | ||||
| 		indexes:          s.indexes, | ||||
| 	} | ||||
| 	dirEmbed := L | ||||
| 	if s.level&1 != 0 { | ||||
| 		dirEmbed = R | ||||
| 	} | ||||
| 	p.locateBrackets(s.p.pairTypes, s.p.pairValues) | ||||
| 	p.resolveBrackets(dirEmbed, s.p.initialTypes) | ||||
| } | ||||
|  | ||||
| type bracketPairer struct { | ||||
| 	sos Class // direction corresponding to start of sequence | ||||
|  | ||||
| 	// The following is a restatement of BD 16 using non-algorithmic language. | ||||
| 	// | ||||
| 	// A bracket pair is a pair of characters consisting of an opening | ||||
| 	// paired bracket and a closing paired bracket such that the | ||||
| 	// Bidi_Paired_Bracket property value of the former equals the latter, | ||||
| 	// subject to the following constraints. | ||||
| 	// - both characters of a pair occur in the same isolating run sequence | ||||
| 	// - the closing character of a pair follows the opening character | ||||
| 	// - any bracket character can belong at most to one pair, the earliest possible one | ||||
| 	// - any bracket character not part of a pair is treated like an ordinary character | ||||
| 	// - pairs may nest properly, but their spans may not overlap otherwise | ||||
|  | ||||
| 	// Bracket characters with canonical decompositions are supposed to be | ||||
| 	// treated as if they had been normalized, to allow normalized and non- | ||||
| 	// normalized text to give the same result. In this implementation that step | ||||
| 	// is pushed out to the caller. The caller has to ensure that the pairValue | ||||
| 	// slices contain the rune of the opening bracket after normalization for | ||||
| 	// any opening or closing bracket. | ||||
|  | ||||
| 	openers *list.List // list of positions for opening brackets | ||||
|  | ||||
| 	// bracket pair positions sorted by location of opening bracket | ||||
| 	pairPositions bracketPairs | ||||
|  | ||||
| 	codesIsolatedRun []Class // directional bidi codes for an isolated run | ||||
| 	indexes          []int   // array of index values into the original string | ||||
|  | ||||
| } | ||||
|  | ||||
| // matchOpener reports whether characters at given positions form a matching | ||||
| // bracket pair. | ||||
| func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool { | ||||
| 	return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]] | ||||
| } | ||||
|  | ||||
| const maxPairingDepth = 63 | ||||
|  | ||||
| // locateBrackets locates matching bracket pairs according to BD16. | ||||
| // | ||||
| // This implementation uses a linked list instead of a stack, because, while | ||||
| // elements are added at the front (like a push) they are not generally removed | ||||
| // in atomic 'pop' operations, reducing the benefit of the stack archetype. | ||||
| func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) { | ||||
| 	// traverse the run | ||||
| 	// do that explicitly (not in a for-each) so we can record position | ||||
| 	for i, index := range p.indexes { | ||||
|  | ||||
| 		// look at the bracket type for each character | ||||
| 		if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON { | ||||
| 			// continue scanning | ||||
| 			continue | ||||
| 		} | ||||
| 		switch pairTypes[index] { | ||||
| 		case bpOpen: | ||||
| 			// check if maximum pairing depth reached | ||||
| 			if p.openers.Len() == maxPairingDepth { | ||||
| 				p.openers.Init() | ||||
| 				return | ||||
| 			} | ||||
| 			// remember opener location, most recent first | ||||
| 			p.openers.PushFront(i) | ||||
|  | ||||
| 		case bpClose: | ||||
| 			// see if there is a match | ||||
| 			count := 0 | ||||
| 			for elem := p.openers.Front(); elem != nil; elem = elem.Next() { | ||||
| 				count++ | ||||
| 				opener := elem.Value.(int) | ||||
| 				if p.matchOpener(pairValues, opener, i) { | ||||
| 					// if the opener matches, add nested pair to the ordered list | ||||
| 					p.pairPositions = append(p.pairPositions, bracketPair{opener, i}) | ||||
| 					// remove up to and including matched opener | ||||
| 					for ; count > 0; count-- { | ||||
| 						p.openers.Remove(p.openers.Front()) | ||||
| 					} | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			sort.Sort(p.pairPositions) | ||||
| 			// if we get here, the closing bracket matched no openers | ||||
| 			// and gets ignored | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Bracket pairs within an isolating run sequence are processed as units so | ||||
| // that both the opening and the closing paired bracket in a pair resolve to | ||||
| // the same direction. | ||||
| // | ||||
| // N0. Process bracket pairs in an isolating run sequence sequentially in | ||||
| // the logical order of the text positions of the opening paired brackets | ||||
| // using the logic given below. Within this scope, bidirectional types EN | ||||
| // and AN are treated as R. | ||||
| // | ||||
| // Identify the bracket pairs in the current isolating run sequence | ||||
| // according to BD16. For each bracket-pair element in the list of pairs of | ||||
| // text positions: | ||||
| // | ||||
| // a Inspect the bidirectional types of the characters enclosed within the | ||||
| // bracket pair. | ||||
| // | ||||
| // b If any strong type (either L or R) matching the embedding direction is | ||||
| // found, set the type for both brackets in the pair to match the embedding | ||||
| // direction. | ||||
| // | ||||
| // o [ e ] o -> o e e e o | ||||
| // | ||||
| // o [ o e ] -> o e o e e | ||||
| // | ||||
| // o [ NI e ] -> o e NI e e | ||||
| // | ||||
| // c Otherwise, if a strong type (opposite the embedding direction) is | ||||
| // found, test for adjacent strong types as follows: 1 First, check | ||||
| // backwards before the opening paired bracket until the first strong type | ||||
| // (L, R, or sos) is found. If that first preceding strong type is opposite | ||||
| // the embedding direction, then set the type for both brackets in the pair | ||||
| // to that type. 2 Otherwise, set the type for both brackets in the pair to | ||||
| // the embedding direction. | ||||
| // | ||||
| // o [ o ] e -> o o o o e | ||||
| // | ||||
| // o [ o NI ] o -> o o o NI o o | ||||
| // | ||||
| // e [ o ] o -> e e o e o | ||||
| // | ||||
| // e [ o ] e -> e e o e e | ||||
| // | ||||
| // e ( o [ o ] NI ) e -> e e o o o o NI e e | ||||
| // | ||||
| // d Otherwise, do not set the type for the current bracket pair. Note that | ||||
| // if the enclosed text contains no strong types the paired brackets will | ||||
| // both resolve to the same level when resolved individually using rules N1 | ||||
| // and N2. | ||||
| // | ||||
| // e ( NI ) o -> e ( NI ) o | ||||
|  | ||||
| // getStrongTypeN0 maps character's directional code to strong type as required | ||||
| // by rule N0. | ||||
| // | ||||
| // TODO: have separate type for "strong" directionality. | ||||
| func (p *bracketPairer) getStrongTypeN0(index int) Class { | ||||
| 	switch p.codesIsolatedRun[index] { | ||||
| 	// in the scope of N0, number types are treated as R | ||||
| 	case EN, AN, AL, R: | ||||
| 		return R | ||||
| 	case L: | ||||
| 		return L | ||||
| 	default: | ||||
| 		return ON | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // classifyPairContent reports the strong types contained inside a Bracket Pair, | ||||
| // assuming the given embedding direction. | ||||
| // | ||||
| // It returns ON if no strong type is found. If a single strong type is found, | ||||
| // it returns this this type. Otherwise it returns the embedding direction. | ||||
| // | ||||
| // TODO: use separate type for "strong" directionality. | ||||
| func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { | ||||
| 	dirOpposite := ON | ||||
| 	for i := loc.opener + 1; i < loc.closer; i++ { | ||||
| 		dir := p.getStrongTypeN0(i) | ||||
| 		if dir == ON { | ||||
| 			continue | ||||
| 		} | ||||
| 		if dir == dirEmbed { | ||||
| 			return dir // type matching embedding direction found | ||||
| 		} | ||||
| 		dirOpposite = dir | ||||
| 	} | ||||
| 	// return ON if no strong type found, or class opposite to dirEmbed | ||||
| 	return dirOpposite | ||||
| } | ||||
|  | ||||
| // classBeforePair determines which strong types are present before a Bracket | ||||
| // Pair. Return R or L if strong type found, otherwise ON. | ||||
| func (p *bracketPairer) classBeforePair(loc bracketPair) Class { | ||||
| 	for i := loc.opener - 1; i >= 0; i-- { | ||||
| 		if dir := p.getStrongTypeN0(i); dir != ON { | ||||
| 			return dir | ||||
| 		} | ||||
| 	} | ||||
| 	// no strong types found, return sos | ||||
| 	return p.sos | ||||
| } | ||||
|  | ||||
| // assignBracketType implements rule N0 for a single bracket pair. | ||||
| func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) { | ||||
| 	// rule "N0, a", inspect contents of pair | ||||
| 	dirPair := p.classifyPairContent(loc, dirEmbed) | ||||
|  | ||||
| 	// dirPair is now L, R, or N (no strong type found) | ||||
|  | ||||
| 	// the following logical tests are performed out of order compared to | ||||
| 	// the statement of the rules but yield the same results | ||||
| 	if dirPair == ON { | ||||
| 		return // case "d" - nothing to do | ||||
| 	} | ||||
|  | ||||
| 	if dirPair != dirEmbed { | ||||
| 		// case "c": strong type found, opposite - check before (c.1) | ||||
| 		dirPair = p.classBeforePair(loc) | ||||
| 		if dirPair == dirEmbed || dirPair == ON { | ||||
| 			// no strong opposite type found before - use embedding (c.2) | ||||
| 			dirPair = dirEmbed | ||||
| 		} | ||||
| 	} | ||||
| 	// else: case "b", strong type found matching embedding, | ||||
| 	// no explicit action needed, as dirPair is already set to embedding | ||||
| 	// direction | ||||
|  | ||||
| 	// set the bracket types to the type found | ||||
| 	p.setBracketsToType(loc, dirPair, initialTypes) | ||||
| } | ||||
|  | ||||
| func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) { | ||||
| 	p.codesIsolatedRun[loc.opener] = dirPair | ||||
| 	p.codesIsolatedRun[loc.closer] = dirPair | ||||
|  | ||||
| 	for i := loc.opener + 1; i < loc.closer; i++ { | ||||
| 		index := p.indexes[i] | ||||
| 		if initialTypes[index] != NSM { | ||||
| 			break | ||||
| 		} | ||||
| 		p.codesIsolatedRun[i] = dirPair | ||||
| 	} | ||||
|  | ||||
| 	for i := loc.closer + 1; i < len(p.indexes); i++ { | ||||
| 		index := p.indexes[i] | ||||
| 		if initialTypes[index] != NSM { | ||||
| 			break | ||||
| 		} | ||||
| 		p.codesIsolatedRun[i] = dirPair | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // resolveBrackets implements rule N0 for a list of pairs. | ||||
| func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) { | ||||
| 	for _, loc := range p.pairPositions { | ||||
| 		p.assignBracketType(loc, dirEmbed, initialTypes) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										1058
									
								
								vendor/golang.org/x/text/unicode/bidi/core.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1058
									
								
								vendor/golang.org/x/text/unicode/bidi/core.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										133
									
								
								vendor/golang.org/x/text/unicode/bidi/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										133
									
								
								vendor/golang.org/x/text/unicode/bidi/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,133 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"flag" | ||||
| 	"log" | ||||
|  | ||||
| 	"golang.org/x/text/internal/gen" | ||||
| 	"golang.org/x/text/internal/triegen" | ||||
| 	"golang.org/x/text/internal/ucd" | ||||
| ) | ||||
|  | ||||
| var outputFile = flag.String("out", "tables.go", "output file") | ||||
|  | ||||
| func main() { | ||||
| 	gen.Init() | ||||
| 	gen.Repackage("gen_trieval.go", "trieval.go", "bidi") | ||||
| 	gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") | ||||
|  | ||||
| 	genTables() | ||||
| } | ||||
|  | ||||
| // bidiClass names and codes taken from class "bc" in | ||||
| // http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt | ||||
| var bidiClass = map[string]Class{ | ||||
| 	"AL":  AL,  // ArabicLetter | ||||
| 	"AN":  AN,  // ArabicNumber | ||||
| 	"B":   B,   // ParagraphSeparator | ||||
| 	"BN":  BN,  // BoundaryNeutral | ||||
| 	"CS":  CS,  // CommonSeparator | ||||
| 	"EN":  EN,  // EuropeanNumber | ||||
| 	"ES":  ES,  // EuropeanSeparator | ||||
| 	"ET":  ET,  // EuropeanTerminator | ||||
| 	"L":   L,   // LeftToRight | ||||
| 	"NSM": NSM, // NonspacingMark | ||||
| 	"ON":  ON,  // OtherNeutral | ||||
| 	"R":   R,   // RightToLeft | ||||
| 	"S":   S,   // SegmentSeparator | ||||
| 	"WS":  WS,  // WhiteSpace | ||||
|  | ||||
| 	"FSI": Control, | ||||
| 	"PDF": Control, | ||||
| 	"PDI": Control, | ||||
| 	"LRE": Control, | ||||
| 	"LRI": Control, | ||||
| 	"LRO": Control, | ||||
| 	"RLE": Control, | ||||
| 	"RLI": Control, | ||||
| 	"RLO": Control, | ||||
| } | ||||
|  | ||||
| func genTables() { | ||||
| 	if numClass > 0x0F { | ||||
| 		log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) | ||||
| 	} | ||||
| 	w := gen.NewCodeWriter() | ||||
| 	defer w.WriteGoFile(*outputFile, "bidi") | ||||
|  | ||||
| 	gen.WriteUnicodeVersion(w) | ||||
|  | ||||
| 	t := triegen.NewTrie("bidi") | ||||
|  | ||||
| 	// Build data about bracket mapping. These bits need to be or-ed with | ||||
| 	// any other bits. | ||||
| 	orMask := map[rune]uint64{} | ||||
|  | ||||
| 	xorMap := map[rune]int{} | ||||
| 	xorMasks := []rune{0} // First value is no-op. | ||||
|  | ||||
| 	ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { | ||||
| 		r1 := p.Rune(0) | ||||
| 		r2 := p.Rune(1) | ||||
| 		xor := r1 ^ r2 | ||||
| 		if _, ok := xorMap[xor]; !ok { | ||||
| 			xorMap[xor] = len(xorMasks) | ||||
| 			xorMasks = append(xorMasks, xor) | ||||
| 		} | ||||
| 		entry := uint64(xorMap[xor]) << xorMaskShift | ||||
| 		switch p.String(2) { | ||||
| 		case "o": | ||||
| 			entry |= openMask | ||||
| 		case "c", "n": | ||||
| 		default: | ||||
| 			log.Fatalf("Unknown bracket class %q.", p.String(2)) | ||||
| 		} | ||||
| 		orMask[r1] = entry | ||||
| 	}) | ||||
|  | ||||
| 	w.WriteComment(` | ||||
| 	xorMasks contains masks to be xor-ed with brackets to get the reverse | ||||
| 	version.`) | ||||
| 	w.WriteVar("xorMasks", xorMasks) | ||||
|  | ||||
| 	done := map[rune]bool{} | ||||
|  | ||||
| 	insert := func(r rune, c Class) { | ||||
| 		if !done[r] { | ||||
| 			t.Insert(r, orMask[r]|uint64(c)) | ||||
| 			done[r] = true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Insert the derived BiDi properties. | ||||
| 	ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { | ||||
| 		r := p.Rune(0) | ||||
| 		class, ok := bidiClass[p.String(1)] | ||||
| 		if !ok { | ||||
| 			log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) | ||||
| 		} | ||||
| 		insert(r, class) | ||||
| 	}) | ||||
| 	visitDefaults(insert) | ||||
|  | ||||
| 	// TODO: use sparse blocks. This would reduce table size considerably | ||||
| 	// from the looks of it. | ||||
|  | ||||
| 	sz, err := t.Gen(w) | ||||
| 	if err != nil { | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| 	w.Size += sz | ||||
| } | ||||
|  | ||||
| // dummy values to make methods in gen_common compile. The real versions | ||||
| // will be generated by this file to tables.go. | ||||
| var ( | ||||
| 	xorMasks []rune | ||||
| ) | ||||
							
								
								
									
										57
									
								
								vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,57 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"unicode" | ||||
|  | ||||
| 	"golang.org/x/text/internal/gen" | ||||
| 	"golang.org/x/text/internal/ucd" | ||||
| 	"golang.org/x/text/unicode/rangetable" | ||||
| ) | ||||
|  | ||||
| // These tables are hand-extracted from: | ||||
| // http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt | ||||
| func visitDefaults(fn func(r rune, c Class)) { | ||||
| 	// first write default values for ranges listed above. | ||||
| 	visitRunes(fn, AL, []rune{ | ||||
| 		0x0600, 0x07BF, // Arabic | ||||
| 		0x08A0, 0x08FF, // Arabic Extended-A | ||||
| 		0xFB50, 0xFDCF, // Arabic Presentation Forms | ||||
| 		0xFDF0, 0xFDFF, | ||||
| 		0xFE70, 0xFEFF, | ||||
| 		0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols | ||||
| 	}) | ||||
| 	visitRunes(fn, R, []rune{ | ||||
| 		0x0590, 0x05FF, // Hebrew | ||||
| 		0x07C0, 0x089F, // Nko et al. | ||||
| 		0xFB1D, 0xFB4F, | ||||
| 		0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. | ||||
| 		0x0001E800, 0x0001EDFF, | ||||
| 		0x0001EF00, 0x0001EFFF, | ||||
| 	}) | ||||
| 	visitRunes(fn, ET, []rune{ // European Terminator | ||||
| 		0x20A0, 0x20Cf, // Currency symbols | ||||
| 	}) | ||||
| 	rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { | ||||
| 		fn(r, BN) // Boundary Neutral | ||||
| 	}) | ||||
| 	ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { | ||||
| 		if p.String(1) == "Default_Ignorable_Code_Point" { | ||||
| 			fn(p.Rune(0), BN) // Boundary Neutral | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { | ||||
| 	for i := 0; i < len(runes); i += 2 { | ||||
| 		lo, hi := runes[i], runes[i+1] | ||||
| 		for j := lo; j <= hi; j++ { | ||||
| 			fn(j, c) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										64
									
								
								vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| package main | ||||
|  | ||||
| // Class is the Unicode BiDi class. Each rune has a single class. | ||||
| type Class uint | ||||
|  | ||||
| const ( | ||||
| 	L       Class = iota // LeftToRight | ||||
| 	R                    // RightToLeft | ||||
| 	EN                   // EuropeanNumber | ||||
| 	ES                   // EuropeanSeparator | ||||
| 	ET                   // EuropeanTerminator | ||||
| 	AN                   // ArabicNumber | ||||
| 	CS                   // CommonSeparator | ||||
| 	B                    // ParagraphSeparator | ||||
| 	S                    // SegmentSeparator | ||||
| 	WS                   // WhiteSpace | ||||
| 	ON                   // OtherNeutral | ||||
| 	BN                   // BoundaryNeutral | ||||
| 	NSM                  // NonspacingMark | ||||
| 	AL                   // ArabicLetter | ||||
| 	Control              // Control LRO - PDI | ||||
|  | ||||
| 	numClass | ||||
|  | ||||
| 	LRO // LeftToRightOverride | ||||
| 	RLO // RightToLeftOverride | ||||
| 	LRE // LeftToRightEmbedding | ||||
| 	RLE // RightToLeftEmbedding | ||||
| 	PDF // PopDirectionalFormat | ||||
| 	LRI // LeftToRightIsolate | ||||
| 	RLI // RightToLeftIsolate | ||||
| 	FSI // FirstStrongIsolate | ||||
| 	PDI // PopDirectionalIsolate | ||||
|  | ||||
| 	unknownClass = ^Class(0) | ||||
| ) | ||||
|  | ||||
| var controlToClass = map[rune]Class{ | ||||
| 	0x202D: LRO, // LeftToRightOverride, | ||||
| 	0x202E: RLO, // RightToLeftOverride, | ||||
| 	0x202A: LRE, // LeftToRightEmbedding, | ||||
| 	0x202B: RLE, // RightToLeftEmbedding, | ||||
| 	0x202C: PDF, // PopDirectionalFormat, | ||||
| 	0x2066: LRI, // LeftToRightIsolate, | ||||
| 	0x2067: RLI, // RightToLeftIsolate, | ||||
| 	0x2068: FSI, // FirstStrongIsolate, | ||||
| 	0x2069: PDI, // PopDirectionalIsolate, | ||||
| } | ||||
|  | ||||
| // A trie entry has the following bits: | ||||
| // 7..5  XOR mask for brackets | ||||
| // 4     1: Bracket open, 0: Bracket close | ||||
| // 3..0  Class type | ||||
|  | ||||
| const ( | ||||
| 	openMask     = 0x10 | ||||
| 	xorMaskShift = 5 | ||||
| ) | ||||
							
								
								
									
										206
									
								
								vendor/golang.org/x/text/unicode/bidi/prop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										206
									
								
								vendor/golang.org/x/text/unicode/bidi/prop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,206 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package bidi | ||||
|  | ||||
| import "unicode/utf8" | ||||
|  | ||||
| // Properties provides access to BiDi properties of runes. | ||||
| type Properties struct { | ||||
| 	entry uint8 | ||||
| 	last  uint8 | ||||
| } | ||||
|  | ||||
| var trie = newBidiTrie(0) | ||||
|  | ||||
| // TODO: using this for bidirule reduces the running time by about 5%. Consider | ||||
| // if this is worth exposing or if we can find a way to speed up the Class | ||||
| // method. | ||||
| // | ||||
| // // CompactClass is like Class, but maps all of the BiDi control classes | ||||
| // // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control. | ||||
| // func (p Properties) CompactClass() Class { | ||||
| // 	return Class(p.entry & 0x0F) | ||||
| // } | ||||
|  | ||||
| // Class returns the Bidi class for p. | ||||
| func (p Properties) Class() Class { | ||||
| 	c := Class(p.entry & 0x0F) | ||||
| 	if c == Control { | ||||
| 		c = controlByteToClass[p.last&0xF] | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // IsBracket reports whether the rune is a bracket. | ||||
| func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 } | ||||
|  | ||||
| // IsOpeningBracket reports whether the rune is an opening bracket. | ||||
| // IsBracket must return true. | ||||
| func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 } | ||||
|  | ||||
| // TODO: find a better API and expose. | ||||
| func (p Properties) reverseBracket(r rune) rune { | ||||
| 	return xorMasks[p.entry>>xorMaskShift] ^ r | ||||
| } | ||||
|  | ||||
| var controlByteToClass = [16]Class{ | ||||
| 	0xD: LRO, // U+202D LeftToRightOverride, | ||||
| 	0xE: RLO, // U+202E RightToLeftOverride, | ||||
| 	0xA: LRE, // U+202A LeftToRightEmbedding, | ||||
| 	0xB: RLE, // U+202B RightToLeftEmbedding, | ||||
| 	0xC: PDF, // U+202C PopDirectionalFormat, | ||||
| 	0x6: LRI, // U+2066 LeftToRightIsolate, | ||||
| 	0x7: RLI, // U+2067 RightToLeftIsolate, | ||||
| 	0x8: FSI, // U+2068 FirstStrongIsolate, | ||||
| 	0x9: PDI, // U+2069 PopDirectionalIsolate, | ||||
| } | ||||
|  | ||||
| // LookupRune returns properties for r. | ||||
| func LookupRune(r rune) (p Properties, size int) { | ||||
| 	var buf [4]byte | ||||
| 	n := utf8.EncodeRune(buf[:], r) | ||||
| 	return Lookup(buf[:n]) | ||||
| } | ||||
|  | ||||
| // TODO: these lookup methods are based on the generated trie code. The returned | ||||
| // sizes have slightly different semantics from the generated code, in that it | ||||
| // always returns size==1 for an illegal UTF-8 byte (instead of the length | ||||
| // of the maximum invalid subsequence). Most Transformers, like unicode/norm, | ||||
| // leave invalid UTF-8 untouched, in which case it has performance benefits to | ||||
| // do so (without changing the semantics). Bidi requires the semantics used here | ||||
| // for the bidirule implementation to be compatible with the Go semantics. | ||||
| //  They ultimately should perhaps be adopted by all trie implementations, for | ||||
| // convenience sake. | ||||
| // This unrolled code also boosts performance of the secure/bidirule package by | ||||
| // about 30%. | ||||
| // So, to remove this code: | ||||
| //   - add option to trie generator to define return type. | ||||
| //   - always return 1 byte size for ill-formed UTF-8 runes. | ||||
|  | ||||
| // Lookup returns properties for the first rune in s and the width in bytes of | ||||
| // its encoding. The size will be 0 if s does not hold enough bytes to complete | ||||
| // the encoding. | ||||
| func Lookup(s []byte) (p Properties, sz int) { | ||||
| 	c0 := s[0] | ||||
| 	switch { | ||||
| 	case c0 < 0x80: // is ASCII | ||||
| 		return Properties{entry: bidiValues[c0]}, 1 | ||||
| 	case c0 < 0xC2: | ||||
| 		return Properties{}, 1 | ||||
| 	case c0 < 0xE0: // 2-byte UTF-8 | ||||
| 		if len(s) < 2 { | ||||
| 			return Properties{}, 0 | ||||
| 		} | ||||
| 		i := bidiIndex[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 | ||||
| 	case c0 < 0xF0: // 3-byte UTF-8 | ||||
| 		if len(s) < 3 { | ||||
| 			return Properties{}, 0 | ||||
| 		} | ||||
| 		i := bidiIndex[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = bidiIndex[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 | ||||
| 	case c0 < 0xF8: // 4-byte UTF-8 | ||||
| 		if len(s) < 4 { | ||||
| 			return Properties{}, 0 | ||||
| 		} | ||||
| 		i := bidiIndex[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = bidiIndex[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		o = uint32(i)<<6 + uint32(c2) | ||||
| 		i = bidiIndex[o] | ||||
| 		c3 := s[3] | ||||
| 		if c3 < 0x80 || 0xC0 <= c3 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 | ||||
| 	} | ||||
| 	// Illegal rune | ||||
| 	return Properties{}, 1 | ||||
| } | ||||
|  | ||||
| // LookupString returns properties for the first rune in s and the width in | ||||
| // bytes of its encoding. The size will be 0 if s does not hold enough bytes to | ||||
| // complete the encoding. | ||||
| func LookupString(s string) (p Properties, sz int) { | ||||
| 	c0 := s[0] | ||||
| 	switch { | ||||
| 	case c0 < 0x80: // is ASCII | ||||
| 		return Properties{entry: bidiValues[c0]}, 1 | ||||
| 	case c0 < 0xC2: | ||||
| 		return Properties{}, 1 | ||||
| 	case c0 < 0xE0: // 2-byte UTF-8 | ||||
| 		if len(s) < 2 { | ||||
| 			return Properties{}, 0 | ||||
| 		} | ||||
| 		i := bidiIndex[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 | ||||
| 	case c0 < 0xF0: // 3-byte UTF-8 | ||||
| 		if len(s) < 3 { | ||||
| 			return Properties{}, 0 | ||||
| 		} | ||||
| 		i := bidiIndex[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = bidiIndex[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 | ||||
| 	case c0 < 0xF8: // 4-byte UTF-8 | ||||
| 		if len(s) < 4 { | ||||
| 			return Properties{}, 0 | ||||
| 		} | ||||
| 		i := bidiIndex[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = bidiIndex[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		o = uint32(i)<<6 + uint32(c2) | ||||
| 		i = bidiIndex[o] | ||||
| 		c3 := s[3] | ||||
| 		if c3 < 0x80 || 0xC0 <= c3 { | ||||
| 			return Properties{}, 1 | ||||
| 		} | ||||
| 		return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 | ||||
| 	} | ||||
| 	// Illegal rune | ||||
| 	return Properties{}, 1 | ||||
| } | ||||
							
								
								
									
										1779
									
								
								vendor/golang.org/x/text/unicode/bidi/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1779
									
								
								vendor/golang.org/x/text/unicode/bidi/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										60
									
								
								vendor/golang.org/x/text/unicode/bidi/trieval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								vendor/golang.org/x/text/unicode/bidi/trieval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,60 @@ | ||||
| // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. | ||||
|  | ||||
| package bidi | ||||
|  | ||||
| // Class is the Unicode BiDi class. Each rune has a single class. | ||||
| type Class uint | ||||
|  | ||||
| const ( | ||||
| 	L       Class = iota // LeftToRight | ||||
| 	R                    // RightToLeft | ||||
| 	EN                   // EuropeanNumber | ||||
| 	ES                   // EuropeanSeparator | ||||
| 	ET                   // EuropeanTerminator | ||||
| 	AN                   // ArabicNumber | ||||
| 	CS                   // CommonSeparator | ||||
| 	B                    // ParagraphSeparator | ||||
| 	S                    // SegmentSeparator | ||||
| 	WS                   // WhiteSpace | ||||
| 	ON                   // OtherNeutral | ||||
| 	BN                   // BoundaryNeutral | ||||
| 	NSM                  // NonspacingMark | ||||
| 	AL                   // ArabicLetter | ||||
| 	Control              // Control LRO - PDI | ||||
|  | ||||
| 	numClass | ||||
|  | ||||
| 	LRO // LeftToRightOverride | ||||
| 	RLO // RightToLeftOverride | ||||
| 	LRE // LeftToRightEmbedding | ||||
| 	RLE // RightToLeftEmbedding | ||||
| 	PDF // PopDirectionalFormat | ||||
| 	LRI // LeftToRightIsolate | ||||
| 	RLI // RightToLeftIsolate | ||||
| 	FSI // FirstStrongIsolate | ||||
| 	PDI // PopDirectionalIsolate | ||||
|  | ||||
| 	unknownClass = ^Class(0) | ||||
| ) | ||||
|  | ||||
| var controlToClass = map[rune]Class{ | ||||
| 	0x202D: LRO, // LeftToRightOverride, | ||||
| 	0x202E: RLO, // RightToLeftOverride, | ||||
| 	0x202A: LRE, // LeftToRightEmbedding, | ||||
| 	0x202B: RLE, // RightToLeftEmbedding, | ||||
| 	0x202C: PDF, // PopDirectionalFormat, | ||||
| 	0x2066: LRI, // LeftToRightIsolate, | ||||
| 	0x2067: RLI, // RightToLeftIsolate, | ||||
| 	0x2068: FSI, // FirstStrongIsolate, | ||||
| 	0x2069: PDI, // PopDirectionalIsolate, | ||||
| } | ||||
|  | ||||
| // A trie entry has the following bits: | ||||
| // 7..5  XOR mask for brackets | ||||
| // 4     1: Bracket open, 0: Bracket close | ||||
| // 3..0  Class type | ||||
|  | ||||
| const ( | ||||
| 	openMask     = 0x10 | ||||
| 	xorMaskShift = 5 | ||||
| ) | ||||
							
								
								
									
										100
									
								
								vendor/golang.org/x/text/unicode/cldr/base.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								vendor/golang.org/x/text/unicode/cldr/base.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,100 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package cldr | ||||
|  | ||||
| import ( | ||||
| 	"encoding/xml" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| // Elem is implemented by every XML element. | ||||
| type Elem interface { | ||||
| 	setEnclosing(Elem) | ||||
| 	setName(string) | ||||
| 	enclosing() Elem | ||||
|  | ||||
| 	GetCommon() *Common | ||||
| } | ||||
|  | ||||
| type hidden struct { | ||||
| 	CharData string `xml:",chardata"` | ||||
| 	Alias    *struct { | ||||
| 		Common | ||||
| 		Source string `xml:"source,attr"` | ||||
| 		Path   string `xml:"path,attr"` | ||||
| 	} `xml:"alias"` | ||||
| 	Def *struct { | ||||
| 		Common | ||||
| 		Choice string `xml:"choice,attr,omitempty"` | ||||
| 		Type   string `xml:"type,attr,omitempty"` | ||||
| 	} `xml:"default"` | ||||
| } | ||||
|  | ||||
| // Common holds several of the most common attributes and sub elements | ||||
| // of an XML element. | ||||
| type Common struct { | ||||
| 	XMLName         xml.Name | ||||
| 	name            string | ||||
| 	enclElem        Elem | ||||
| 	Type            string `xml:"type,attr,omitempty"` | ||||
| 	Reference       string `xml:"reference,attr,omitempty"` | ||||
| 	Alt             string `xml:"alt,attr,omitempty"` | ||||
| 	ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` | ||||
| 	Draft           string `xml:"draft,attr,omitempty"` | ||||
| 	hidden | ||||
| } | ||||
|  | ||||
| // Default returns the default type to select from the enclosed list | ||||
| // or "" if no default value is specified. | ||||
| func (e *Common) Default() string { | ||||
| 	if e.Def == nil { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if e.Def.Choice != "" { | ||||
| 		return e.Def.Choice | ||||
| 	} else if e.Def.Type != "" { | ||||
| 		// Type is still used by the default element in collation. | ||||
| 		return e.Def.Type | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // GetCommon returns e. It is provided such that Common implements Elem. | ||||
| func (e *Common) GetCommon() *Common { | ||||
| 	return e | ||||
| } | ||||
|  | ||||
| // Data returns the character data accumulated for this element. | ||||
| func (e *Common) Data() string { | ||||
| 	e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) | ||||
| 	return e.CharData | ||||
| } | ||||
|  | ||||
| func (e *Common) setName(s string) { | ||||
| 	e.name = s | ||||
| } | ||||
|  | ||||
| func (e *Common) enclosing() Elem { | ||||
| 	return e.enclElem | ||||
| } | ||||
|  | ||||
| func (e *Common) setEnclosing(en Elem) { | ||||
| 	e.enclElem = en | ||||
| } | ||||
|  | ||||
| // Escape characters that can be escaped without further escaping the string. | ||||
| var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) | ||||
|  | ||||
| // replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. | ||||
| // It assumes the input string is correctly formatted. | ||||
| func replaceUnicode(s string) string { | ||||
| 	if s[1] == '#' { | ||||
| 		r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) | ||||
| 		return string(r) | ||||
| 	} | ||||
| 	r, _, _, _ := strconv.UnquoteChar(s, 0) | ||||
| 	return string(r) | ||||
| } | ||||
							
								
								
									
										130
									
								
								vendor/golang.org/x/text/unicode/cldr/cldr.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										130
									
								
								vendor/golang.org/x/text/unicode/cldr/cldr.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,130 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| //go:generate go run makexml.go -output xml.go | ||||
|  | ||||
| // Package cldr provides a parser for LDML and related XML formats. | ||||
| // This package is intended to be used by the table generation tools | ||||
| // for the various internationalization-related packages. | ||||
| // As the XML types are generated from the CLDR DTD, and as the CLDR standard | ||||
| // is periodically amended, this package may change considerably over time. | ||||
| // This mostly means that data may appear and disappear between versions. | ||||
| // That is, old code should keep compiling for newer versions, but data | ||||
| // may have moved or changed. | ||||
| // CLDR version 22 is the first version supported by this package. | ||||
| // Older versions may not work. | ||||
| package cldr // import "golang.org/x/text/unicode/cldr" | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| ) | ||||
|  | ||||
| // CLDR provides access to parsed data of the Unicode Common Locale Data Repository. | ||||
| type CLDR struct { | ||||
| 	parent   map[string][]string | ||||
| 	locale   map[string]*LDML | ||||
| 	resolved map[string]*LDML | ||||
| 	bcp47    *LDMLBCP47 | ||||
| 	supp     *SupplementalData | ||||
| } | ||||
|  | ||||
| func makeCLDR() *CLDR { | ||||
| 	return &CLDR{ | ||||
| 		parent:   make(map[string][]string), | ||||
| 		locale:   make(map[string]*LDML), | ||||
| 		resolved: make(map[string]*LDML), | ||||
| 		bcp47:    &LDMLBCP47{}, | ||||
| 		supp:     &SupplementalData{}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. | ||||
| func (cldr *CLDR) BCP47() *LDMLBCP47 { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Draft indicates the draft level of an element. | ||||
| type Draft int | ||||
|  | ||||
| const ( | ||||
| 	Approved Draft = iota | ||||
| 	Contributed | ||||
| 	Provisional | ||||
| 	Unconfirmed | ||||
| ) | ||||
|  | ||||
| var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} | ||||
|  | ||||
| // ParseDraft returns the Draft value corresponding to the given string. The | ||||
| // empty string corresponds to Approved. | ||||
| func ParseDraft(level string) (Draft, error) { | ||||
| 	if level == "" { | ||||
| 		return Approved, nil | ||||
| 	} | ||||
| 	for i, s := range drafts { | ||||
| 		if level == s { | ||||
| 			return Unconfirmed - Draft(i), nil | ||||
| 		} | ||||
| 	} | ||||
| 	return Approved, fmt.Errorf("cldr: unknown draft level %q", level) | ||||
| } | ||||
|  | ||||
| func (d Draft) String() string { | ||||
| 	return drafts[len(drafts)-1-int(d)] | ||||
| } | ||||
|  | ||||
| // SetDraftLevel sets which draft levels to include in the evaluated LDML. | ||||
| // Any draft element for which the draft level is higher than lev will be excluded. | ||||
| // If multiple draft levels are available for a single element, the one with the | ||||
| // lowest draft level will be selected, unless preferDraft is true, in which case | ||||
| // the highest draft will be chosen. | ||||
| // It is assumed that the underlying LDML is canonicalized. | ||||
| func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { | ||||
| 	// TODO: implement | ||||
| 	cldr.resolved = make(map[string]*LDML) | ||||
| } | ||||
|  | ||||
| // RawLDML returns the LDML XML for id in unresolved form. | ||||
| // id must be one of the strings returned by Locales. | ||||
| func (cldr *CLDR) RawLDML(loc string) *LDML { | ||||
| 	return cldr.locale[loc] | ||||
| } | ||||
|  | ||||
| // LDML returns the fully resolved LDML XML for loc, which must be one of | ||||
| // the strings returned by Locales. | ||||
| func (cldr *CLDR) LDML(loc string) (*LDML, error) { | ||||
| 	return cldr.resolve(loc) | ||||
| } | ||||
|  | ||||
| // Supplemental returns the parsed supplemental data. If no such data was parsed, | ||||
| // nil is returned. | ||||
| func (cldr *CLDR) Supplemental() *SupplementalData { | ||||
| 	return cldr.supp | ||||
| } | ||||
|  | ||||
| // Locales returns the locales for which there exist files. | ||||
| // Valid sublocales for which there is no file are not included. | ||||
| // The root locale is always sorted first. | ||||
| func (cldr *CLDR) Locales() []string { | ||||
| 	loc := []string{"root"} | ||||
| 	hasRoot := false | ||||
| 	for l, _ := range cldr.locale { | ||||
| 		if l == "root" { | ||||
| 			hasRoot = true | ||||
| 			continue | ||||
| 		} | ||||
| 		loc = append(loc, l) | ||||
| 	} | ||||
| 	sort.Strings(loc[1:]) | ||||
| 	if !hasRoot { | ||||
| 		return loc[1:] | ||||
| 	} | ||||
| 	return loc | ||||
| } | ||||
|  | ||||
| // Get fills in the fields of x based on the XPath path. | ||||
| func Get(e Elem, path string) (res Elem, err error) { | ||||
| 	return walkXPath(e, path) | ||||
| } | ||||
							
								
								
									
										359
									
								
								vendor/golang.org/x/text/unicode/cldr/collate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										359
									
								
								vendor/golang.org/x/text/unicode/cldr/collate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,359 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package cldr | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/xml" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| // RuleProcessor can be passed to Collator's Process method, which | ||||
| // parses the rules and calls the respective method for each rule found. | ||||
| type RuleProcessor interface { | ||||
| 	Reset(anchor string, before int) error | ||||
| 	Insert(level int, str, context, extend string) error | ||||
| 	Index(id string) | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	// cldrIndex is a Unicode-reserved sentinel value used to mark the start | ||||
| 	// of a grouping within an index. | ||||
| 	// We ignore any rule that starts with this rune. | ||||
| 	// See http://unicode.org/reports/tr35/#Collation_Elements for details. | ||||
| 	cldrIndex = "\uFDD0" | ||||
|  | ||||
| 	// specialAnchor is the format in which to represent logical reset positions, | ||||
| 	// such as "first tertiary ignorable". | ||||
| 	specialAnchor = "<%s/>" | ||||
| ) | ||||
|  | ||||
| // Process parses the rules for the tailorings of this collation | ||||
| // and calls the respective methods of p for each rule found. | ||||
| func (c Collation) Process(p RuleProcessor) (err error) { | ||||
| 	if len(c.Cr) > 0 { | ||||
| 		if len(c.Cr) > 1 { | ||||
| 			return fmt.Errorf("multiple cr elements, want 0 or 1") | ||||
| 		} | ||||
| 		return processRules(p, c.Cr[0].Data()) | ||||
| 	} | ||||
| 	if c.Rules.Any != nil { | ||||
| 		return c.processXML(p) | ||||
| 	} | ||||
| 	return errors.New("no tailoring data") | ||||
| } | ||||
|  | ||||
| // processRules parses rules in the Collation Rule Syntax defined in | ||||
| // http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. | ||||
| func processRules(p RuleProcessor, s string) (err error) { | ||||
| 	chk := func(s string, e error) string { | ||||
| 		if err == nil { | ||||
| 			err = e | ||||
| 		} | ||||
| 		return s | ||||
| 	} | ||||
| 	i := 0 // Save the line number for use after the loop. | ||||
| 	scanner := bufio.NewScanner(strings.NewReader(s)) | ||||
| 	for ; scanner.Scan() && err == nil; i++ { | ||||
| 		for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { | ||||
| 			level := 5 | ||||
| 			var ch byte | ||||
| 			switch ch, s = s[0], s[1:]; ch { | ||||
| 			case '&': // followed by <anchor> or '[' <key> ']' | ||||
| 				if s = skipSpace(s); consume(&s, '[') { | ||||
| 					s = chk(parseSpecialAnchor(p, s)) | ||||
| 				} else { | ||||
| 					s = chk(parseAnchor(p, 0, s)) | ||||
| 				} | ||||
| 			case '<': // sort relation '<'{1,4}, optionally followed by '*'. | ||||
| 				for level = 1; consume(&s, '<'); level++ { | ||||
| 				} | ||||
| 				if level > 4 { | ||||
| 					err = fmt.Errorf("level %d > 4", level) | ||||
| 				} | ||||
| 				fallthrough | ||||
| 			case '=': // identity relation, optionally followed by *. | ||||
| 				if consume(&s, '*') { | ||||
| 					s = chk(parseSequence(p, level, s)) | ||||
| 				} else { | ||||
| 					s = chk(parseOrder(p, level, s)) | ||||
| 				} | ||||
| 			default: | ||||
| 				chk("", fmt.Errorf("illegal operator %q", ch)) | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if chk("", scanner.Err()); err != nil { | ||||
| 		return fmt.Errorf("%d: %v", i, err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // parseSpecialAnchor parses the anchor syntax which is either of the form | ||||
| //    ['before' <level>] <anchor> | ||||
| // or | ||||
| //    [<label>] | ||||
| // The starting should already be consumed. | ||||
| func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) { | ||||
| 	i := strings.IndexByte(s, ']') | ||||
| 	if i == -1 { | ||||
| 		return "", errors.New("unmatched bracket") | ||||
| 	} | ||||
| 	a := strings.TrimSpace(s[:i]) | ||||
| 	s = s[i+1:] | ||||
| 	if strings.HasPrefix(a, "before ") { | ||||
| 		l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3) | ||||
| 		if err != nil { | ||||
| 			return s, err | ||||
| 		} | ||||
| 		return parseAnchor(p, int(l), s) | ||||
| 	} | ||||
| 	return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0) | ||||
| } | ||||
|  | ||||
| func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) { | ||||
| 	anchor, s, err := scanString(s) | ||||
| 	if err != nil { | ||||
| 		return s, err | ||||
| 	} | ||||
| 	return s, p.Reset(anchor, level) | ||||
| } | ||||
|  | ||||
| func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) { | ||||
| 	var value, context, extend string | ||||
| 	if value, s, err = scanString(s); err != nil { | ||||
| 		return s, err | ||||
| 	} | ||||
| 	if strings.HasPrefix(value, cldrIndex) { | ||||
| 		p.Index(value[len(cldrIndex):]) | ||||
| 		return | ||||
| 	} | ||||
| 	if consume(&s, '|') { | ||||
| 		if context, s, err = scanString(s); err != nil { | ||||
| 			return s, errors.New("missing string after context") | ||||
| 		} | ||||
| 	} | ||||
| 	if consume(&s, '/') { | ||||
| 		if extend, s, err = scanString(s); err != nil { | ||||
| 			return s, errors.New("missing string after extension") | ||||
| 		} | ||||
| 	} | ||||
| 	return s, p.Insert(level, value, context, extend) | ||||
| } | ||||
|  | ||||
| // scanString scans a single input string. | ||||
| func scanString(s string) (str, tail string, err error) { | ||||
| 	if s = skipSpace(s); s == "" { | ||||
| 		return s, s, errors.New("missing string") | ||||
| 	} | ||||
| 	buf := [16]byte{} // small but enough to hold most cases. | ||||
| 	value := buf[:0] | ||||
| 	for s != "" { | ||||
| 		if consume(&s, '\'') { | ||||
| 			i := strings.IndexByte(s, '\'') | ||||
| 			if i == -1 { | ||||
| 				return "", "", errors.New(`unmatched single quote`) | ||||
| 			} | ||||
| 			if i == 0 { | ||||
| 				value = append(value, '\'') | ||||
| 			} else { | ||||
| 				value = append(value, s[:i]...) | ||||
| 			} | ||||
| 			s = s[i+1:] | ||||
| 			continue | ||||
| 		} | ||||
| 		r, sz := utf8.DecodeRuneInString(s) | ||||
| 		if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) { | ||||
| 			break | ||||
| 		} | ||||
| 		value = append(value, s[:sz]...) | ||||
| 		s = s[sz:] | ||||
| 	} | ||||
| 	return string(value), skipSpace(s), nil | ||||
| } | ||||
|  | ||||
| func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) { | ||||
| 	if s = skipSpace(s); s == "" { | ||||
| 		return s, errors.New("empty sequence") | ||||
| 	} | ||||
| 	last := rune(0) | ||||
| 	for s != "" { | ||||
| 		r, sz := utf8.DecodeRuneInString(s) | ||||
| 		s = s[sz:] | ||||
|  | ||||
| 		if r == '-' { | ||||
| 			// We have a range. The first element was already written. | ||||
| 			if last == 0 { | ||||
| 				return s, errors.New("range without starter value") | ||||
| 			} | ||||
| 			r, sz = utf8.DecodeRuneInString(s) | ||||
| 			s = s[sz:] | ||||
| 			if r == utf8.RuneError || r < last { | ||||
| 				return s, fmt.Errorf("invalid range %q-%q", last, r) | ||||
| 			} | ||||
| 			for i := last + 1; i <= r; i++ { | ||||
| 				if err := p.Insert(level, string(i), "", ""); err != nil { | ||||
| 					return s, err | ||||
| 				} | ||||
| 			} | ||||
| 			last = 0 | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if unicode.IsSpace(r) || unicode.IsPunct(r) { | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		// normal case | ||||
| 		if err := p.Insert(level, string(r), "", ""); err != nil { | ||||
| 			return s, err | ||||
| 		} | ||||
| 		last = r | ||||
| 	} | ||||
| 	return s, nil | ||||
| } | ||||
|  | ||||
| func skipSpace(s string) string { | ||||
| 	return strings.TrimLeftFunc(s, unicode.IsSpace) | ||||
| } | ||||
|  | ||||
| // consumes returns whether the next byte is ch. If so, it gobbles it by | ||||
| // updating s. | ||||
| func consume(s *string, ch byte) (ok bool) { | ||||
| 	if *s == "" || (*s)[0] != ch { | ||||
| 		return false | ||||
| 	} | ||||
| 	*s = (*s)[1:] | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // The following code parses Collation rules of CLDR version 24 and before. | ||||
|  | ||||
| var lmap = map[byte]int{ | ||||
| 	'p': 1, | ||||
| 	's': 2, | ||||
| 	't': 3, | ||||
| 	'i': 5, | ||||
| } | ||||
|  | ||||
| type rulesElem struct { | ||||
| 	Rules struct { | ||||
| 		Common | ||||
| 		Any []*struct { | ||||
| 			XMLName xml.Name | ||||
| 			rule | ||||
| 		} `xml:",any"` | ||||
| 	} `xml:"rules"` | ||||
| } | ||||
|  | ||||
| type rule struct { | ||||
| 	Value  string `xml:",chardata"` | ||||
| 	Before string `xml:"before,attr"` | ||||
| 	Any    []*struct { | ||||
| 		XMLName xml.Name | ||||
| 		rule | ||||
| 	} `xml:",any"` | ||||
| } | ||||
|  | ||||
| var emptyValueError = errors.New("cldr: empty rule value") | ||||
|  | ||||
| func (r *rule) value() (string, error) { | ||||
| 	// Convert hexadecimal Unicode codepoint notation to a string. | ||||
| 	s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode) | ||||
| 	r.Value = s | ||||
| 	if s == "" { | ||||
| 		if len(r.Any) != 1 { | ||||
| 			return "", emptyValueError | ||||
| 		} | ||||
| 		r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local) | ||||
| 		r.Any = nil | ||||
| 	} else if len(r.Any) != 0 { | ||||
| 		return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any) | ||||
| 	} | ||||
| 	return r.Value, nil | ||||
| } | ||||
|  | ||||
| func (r rule) process(p RuleProcessor, name, context, extend string) error { | ||||
| 	v, err := r.value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	switch name { | ||||
| 	case "p", "s", "t", "i": | ||||
| 		if strings.HasPrefix(v, cldrIndex) { | ||||
| 			p.Index(v[len(cldrIndex):]) | ||||
| 			return nil | ||||
| 		} | ||||
| 		if err := p.Insert(lmap[name[0]], v, context, extend); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case "pc", "sc", "tc", "ic": | ||||
| 		level := lmap[name[0]] | ||||
| 		for _, s := range v { | ||||
| 			if err := p.Insert(level, string(s), context, extend); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	default: | ||||
| 		return fmt.Errorf("cldr: unsupported tag: %q", name) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // processXML parses the format of CLDR versions 24 and older. | ||||
| func (c Collation) processXML(p RuleProcessor) (err error) { | ||||
| 	// Collation is generated and defined in xml.go. | ||||
| 	var v string | ||||
| 	for _, r := range c.Rules.Any { | ||||
| 		switch r.XMLName.Local { | ||||
| 		case "reset": | ||||
| 			level := 0 | ||||
| 			switch r.Before { | ||||
| 			case "primary", "1": | ||||
| 				level = 1 | ||||
| 			case "secondary", "2": | ||||
| 				level = 2 | ||||
| 			case "tertiary", "3": | ||||
| 				level = 3 | ||||
| 			case "": | ||||
| 			default: | ||||
| 				return fmt.Errorf("cldr: unknown level %q", r.Before) | ||||
| 			} | ||||
| 			v, err = r.value() | ||||
| 			if err == nil { | ||||
| 				err = p.Reset(v, level) | ||||
| 			} | ||||
| 		case "x": | ||||
| 			var context, extend string | ||||
| 			for _, r1 := range r.Any { | ||||
| 				v, err = r1.value() | ||||
| 				switch r1.XMLName.Local { | ||||
| 				case "context": | ||||
| 					context = v | ||||
| 				case "extend": | ||||
| 					extend = v | ||||
| 				} | ||||
| 			} | ||||
| 			for _, r1 := range r.Any { | ||||
| 				if t := r1.XMLName.Local; t == "context" || t == "extend" { | ||||
| 					continue | ||||
| 				} | ||||
| 				r1.rule.process(p, r1.XMLName.Local, context, extend) | ||||
| 			} | ||||
| 		default: | ||||
| 			err = r.rule.process(p, r.XMLName.Local, "", "") | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										171
									
								
								vendor/golang.org/x/text/unicode/cldr/decode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										171
									
								
								vendor/golang.org/x/text/unicode/cldr/decode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,171 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package cldr | ||||
|  | ||||
| import ( | ||||
| 	"archive/zip" | ||||
| 	"bytes" | ||||
| 	"encoding/xml" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"regexp" | ||||
| ) | ||||
|  | ||||
| // A Decoder loads an archive of CLDR data. | ||||
| type Decoder struct { | ||||
| 	dirFilter     []string | ||||
| 	sectionFilter []string | ||||
| 	loader        Loader | ||||
| 	cldr          *CLDR | ||||
| 	curLocale     string | ||||
| } | ||||
|  | ||||
| // SetSectionFilter takes a list top-level LDML element names to which | ||||
| // evaluation of LDML should be limited.  It automatically calls SetDirFilter. | ||||
| func (d *Decoder) SetSectionFilter(filter ...string) { | ||||
| 	d.sectionFilter = filter | ||||
| 	// TODO: automatically set dir filter | ||||
| } | ||||
|  | ||||
| // SetDirFilter limits the loading of LDML XML files of the specied directories. | ||||
| // Note that sections may be split across directories differently for different CLDR versions. | ||||
| // For more robust code, use SetSectionFilter. | ||||
| func (d *Decoder) SetDirFilter(dir ...string) { | ||||
| 	d.dirFilter = dir | ||||
| } | ||||
|  | ||||
| // A Loader provides access to the files of a CLDR archive. | ||||
| type Loader interface { | ||||
| 	Len() int | ||||
| 	Path(i int) string | ||||
| 	Reader(i int) (io.ReadCloser, error) | ||||
| } | ||||
|  | ||||
| var fileRe = regexp.MustCompile(".*/(.*)/(.*)\\.xml") | ||||
|  | ||||
| // Decode loads and decodes the files represented by l. | ||||
| func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) { | ||||
| 	d.cldr = makeCLDR() | ||||
| 	for i := 0; i < l.Len(); i++ { | ||||
| 		fname := l.Path(i) | ||||
| 		if m := fileRe.FindStringSubmatch(fname); m != nil { | ||||
| 			if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) { | ||||
| 				continue | ||||
| 			} | ||||
| 			var r io.Reader | ||||
| 			if r, err = l.Reader(i); err == nil { | ||||
| 				err = d.decode(m[1], m[2], r) | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	d.cldr.finalize(d.sectionFilter) | ||||
| 	return d.cldr, nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) decode(dir, id string, r io.Reader) error { | ||||
| 	var v interface{} | ||||
| 	var l *LDML | ||||
| 	cldr := d.cldr | ||||
| 	switch { | ||||
| 	case dir == "supplemental": | ||||
| 		v = cldr.supp | ||||
| 	case dir == "transforms": | ||||
| 		return nil | ||||
| 	case dir == "bcp47": | ||||
| 		v = cldr.bcp47 | ||||
| 	case dir == "validity": | ||||
| 		return nil | ||||
| 	default: | ||||
| 		ok := false | ||||
| 		if v, ok = cldr.locale[id]; !ok { | ||||
| 			l = &LDML{} | ||||
| 			v, cldr.locale[id] = l, l | ||||
| 		} | ||||
| 	} | ||||
| 	x := xml.NewDecoder(r) | ||||
| 	if err := x.Decode(v); err != nil { | ||||
| 		log.Printf("%s/%s: %v", dir, id, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	if l != nil { | ||||
| 		if l.Identity == nil { | ||||
| 			return fmt.Errorf("%s/%s: missing identity element", dir, id) | ||||
| 		} | ||||
| 		// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970 | ||||
| 		// is resolved. | ||||
| 		// path := strings.Split(id, "_") | ||||
| 		// if lang := l.Identity.Language.Type; lang != path[0] { | ||||
| 		// 	return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0]) | ||||
| 		// } | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type pathLoader []string | ||||
|  | ||||
| func makePathLoader(path string) (pl pathLoader, err error) { | ||||
| 	err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error { | ||||
| 		pl = append(pl, path) | ||||
| 		return err | ||||
| 	}) | ||||
| 	return pl, err | ||||
| } | ||||
|  | ||||
| func (pl pathLoader) Len() int { | ||||
| 	return len(pl) | ||||
| } | ||||
|  | ||||
| func (pl pathLoader) Path(i int) string { | ||||
| 	return pl[i] | ||||
| } | ||||
|  | ||||
| func (pl pathLoader) Reader(i int) (io.ReadCloser, error) { | ||||
| 	return os.Open(pl[i]) | ||||
| } | ||||
|  | ||||
| // DecodePath loads CLDR data from the given path. | ||||
| func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) { | ||||
| 	loader, err := makePathLoader(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return d.Decode(loader) | ||||
| } | ||||
|  | ||||
| type zipLoader struct { | ||||
| 	r *zip.Reader | ||||
| } | ||||
|  | ||||
| func (zl zipLoader) Len() int { | ||||
| 	return len(zl.r.File) | ||||
| } | ||||
|  | ||||
| func (zl zipLoader) Path(i int) string { | ||||
| 	return zl.r.File[i].Name | ||||
| } | ||||
|  | ||||
| func (zl zipLoader) Reader(i int) (io.ReadCloser, error) { | ||||
| 	return zl.r.File[i].Open() | ||||
| } | ||||
|  | ||||
| // DecodeZip loads CLDR data from the zip archive for which r is the source. | ||||
| func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) { | ||||
| 	buffer, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer))) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return d.Decode(zipLoader{archive}) | ||||
| } | ||||
							
								
								
									
										400
									
								
								vendor/golang.org/x/text/unicode/cldr/makexml.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										400
									
								
								vendor/golang.org/x/text/unicode/cldr/makexml.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,400 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| // This tool generates types for the various XML formats of CLDR. | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"archive/zip" | ||||
| 	"bytes" | ||||
| 	"encoding/xml" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
|  | ||||
| 	"golang.org/x/text/internal/gen" | ||||
| ) | ||||
|  | ||||
| var outputFile = flag.String("output", "xml.go", "output file name") | ||||
|  | ||||
| func main() { | ||||
| 	flag.Parse() | ||||
|  | ||||
| 	r := gen.OpenCLDRCoreZip() | ||||
| 	buffer, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		log.Fatal("Could not read zip file") | ||||
| 	} | ||||
| 	r.Close() | ||||
| 	z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer))) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not read zip archive: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	var buf bytes.Buffer | ||||
|  | ||||
| 	version := gen.CLDRVersion() | ||||
|  | ||||
| 	for _, dtd := range files { | ||||
| 		for _, f := range z.File { | ||||
| 			if strings.HasSuffix(f.Name, dtd.file+".dtd") { | ||||
| 				r, err := f.Open() | ||||
| 				failOnError(err) | ||||
|  | ||||
| 				b := makeBuilder(&buf, dtd) | ||||
| 				b.parseDTD(r) | ||||
| 				b.resolve(b.index[dtd.top[0]]) | ||||
| 				b.write() | ||||
| 				if b.version != "" && version != b.version { | ||||
| 					println(f.Name) | ||||
| 					log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version) | ||||
| 				} | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.") | ||||
| 	fmt.Fprintf(&buf, "const Version = %q\n", version) | ||||
|  | ||||
| 	gen.WriteGoFile(*outputFile, "cldr", buf.Bytes()) | ||||
| } | ||||
|  | ||||
| func failOnError(err error) { | ||||
| 	if err != nil { | ||||
| 		log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error()) | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // configuration data per DTD type | ||||
| type dtd struct { | ||||
| 	file string   // base file name | ||||
| 	root string   // Go name of the root XML element | ||||
| 	top  []string // create a different type for this section | ||||
|  | ||||
| 	skipElem    []string // hard-coded or deprecated elements | ||||
| 	skipAttr    []string // attributes to exclude | ||||
| 	predefined  []string // hard-coded elements exist of the form <name>Elem | ||||
| 	forceRepeat []string // elements to make slices despite DTD | ||||
| } | ||||
|  | ||||
| var files = []dtd{ | ||||
| 	{ | ||||
| 		file: "ldmlBCP47", | ||||
| 		root: "LDMLBCP47", | ||||
| 		top:  []string{"ldmlBCP47"}, | ||||
| 		skipElem: []string{ | ||||
| 			"cldrVersion", // deprecated, not used | ||||
| 		}, | ||||
| 	}, | ||||
| 	{ | ||||
| 		file: "ldmlSupplemental", | ||||
| 		root: "SupplementalData", | ||||
| 		top:  []string{"supplementalData"}, | ||||
| 		skipElem: []string{ | ||||
| 			"cldrVersion", // deprecated, not used | ||||
| 		}, | ||||
| 		forceRepeat: []string{ | ||||
| 			"plurals", // data defined in plurals.xml and ordinals.xml | ||||
| 		}, | ||||
| 	}, | ||||
| 	{ | ||||
| 		file: "ldml", | ||||
| 		root: "LDML", | ||||
| 		top: []string{ | ||||
| 			"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers", | ||||
| 		}, | ||||
| 		skipElem: []string{ | ||||
| 			"cp",       // not used anywhere | ||||
| 			"special",  // not used anywhere | ||||
| 			"fallback", // deprecated, not used | ||||
| 			"alias",    // in Common | ||||
| 			"default",  // in Common | ||||
| 		}, | ||||
| 		skipAttr: []string{ | ||||
| 			"hiraganaQuarternary", // typo in DTD, correct version included as well | ||||
| 		}, | ||||
| 		predefined: []string{"rules"}, | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| var comments = map[string]string{ | ||||
| 	"ldmlBCP47": ` | ||||
| // LDMLBCP47 holds information on allowable values for various variables in LDML. | ||||
| `, | ||||
| 	"supplementalData": ` | ||||
| // SupplementalData holds information relevant for internationalization | ||||
| // and proper use of CLDR, but that is not contained in the locale hierarchy. | ||||
| `, | ||||
| 	"ldml": ` | ||||
| // LDML is the top-level type for locale-specific data. | ||||
| `, | ||||
| 	"collation": ` | ||||
| // Collation contains rules that specify a certain sort-order, | ||||
| // as a tailoring of the root order.  | ||||
| // The parsed rules are obtained by passing a RuleProcessor to Collation's | ||||
| // Process method. | ||||
| `, | ||||
| 	"calendar": ` | ||||
| // Calendar specifies the fields used for formatting and parsing dates and times. | ||||
| // The month and quarter names are identified numerically, starting at 1. | ||||
| // The day (of the week) names are identified with short strings, since there is | ||||
| // no universally-accepted numeric designation. | ||||
| `, | ||||
| 	"dates": ` | ||||
| // Dates contains information regarding the format and parsing of dates and times. | ||||
| `, | ||||
| 	"localeDisplayNames": ` | ||||
| // LocaleDisplayNames specifies localized display names for for scripts, languages, | ||||
| // countries, currencies, and variants. | ||||
| `, | ||||
| 	"numbers": ` | ||||
| // Numbers supplies information for formatting and parsing numbers and currencies. | ||||
| `, | ||||
| } | ||||
|  | ||||
| type element struct { | ||||
| 	name      string // XML element name | ||||
| 	category  string // elements contained by this element | ||||
| 	signature string // category + attrKey* | ||||
|  | ||||
| 	attr []*attribute // attributes supported by this element. | ||||
| 	sub  []struct {   // parsed and evaluated sub elements of this element. | ||||
| 		e      *element | ||||
| 		repeat bool // true if the element needs to be a slice | ||||
| 	} | ||||
|  | ||||
| 	resolved bool // prevent multiple resolutions of this element. | ||||
| } | ||||
|  | ||||
| type attribute struct { | ||||
| 	name string | ||||
| 	key  string | ||||
| 	list []string | ||||
|  | ||||
| 	tag string // Go tag | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	reHead  = regexp.MustCompile(` *(\w+) +([\w\-]+)`) | ||||
| 	reAttr  = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`) | ||||
| 	reElem  = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`) | ||||
| 	reToken = regexp.MustCompile(`\w\-`) | ||||
| ) | ||||
|  | ||||
| // builder is used to read in the DTD files from CLDR and generate Go code | ||||
| // to be used with the encoding/xml package. | ||||
| type builder struct { | ||||
| 	w       io.Writer | ||||
| 	index   map[string]*element | ||||
| 	elem    []*element | ||||
| 	info    dtd | ||||
| 	version string | ||||
| } | ||||
|  | ||||
| func makeBuilder(w io.Writer, d dtd) builder { | ||||
| 	return builder{ | ||||
| 		w:     w, | ||||
| 		index: make(map[string]*element), | ||||
| 		elem:  []*element{}, | ||||
| 		info:  d, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseDTD parses a DTD file. | ||||
| func (b *builder) parseDTD(r io.Reader) { | ||||
| 	for d := xml.NewDecoder(r); ; { | ||||
| 		t, err := d.Token() | ||||
| 		if t == nil { | ||||
| 			break | ||||
| 		} | ||||
| 		failOnError(err) | ||||
| 		dir, ok := t.(xml.Directive) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		m := reHead.FindSubmatch(dir) | ||||
| 		dir = dir[len(m[0]):] | ||||
| 		ename := string(m[2]) | ||||
| 		el, elementFound := b.index[ename] | ||||
| 		switch string(m[1]) { | ||||
| 		case "ELEMENT": | ||||
| 			if elementFound { | ||||
| 				log.Fatal("parseDTD: duplicate entry for element %q", ename) | ||||
| 			} | ||||
| 			m := reElem.FindSubmatch(dir) | ||||
| 			if m == nil { | ||||
| 				log.Fatalf("parseDTD: invalid element %q", string(dir)) | ||||
| 			} | ||||
| 			if len(m[0]) != len(dir) { | ||||
| 				log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0])) | ||||
| 			} | ||||
| 			s := string(m[1]) | ||||
| 			el = &element{ | ||||
| 				name:     ename, | ||||
| 				category: s, | ||||
| 			} | ||||
| 			b.index[ename] = el | ||||
| 		case "ATTLIST": | ||||
| 			if !elementFound { | ||||
| 				log.Fatalf("parseDTD: unknown element %q", ename) | ||||
| 			} | ||||
| 			s := string(dir) | ||||
| 			m := reAttr.FindStringSubmatch(s) | ||||
| 			if m == nil { | ||||
| 				log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir))) | ||||
| 			} | ||||
| 			if m[4] == "FIXED" { | ||||
| 				b.version = m[5] | ||||
| 			} else { | ||||
| 				switch m[1] { | ||||
| 				case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ : | ||||
| 				case "type", "choice": | ||||
| 				default: | ||||
| 					el.attr = append(el.attr, &attribute{ | ||||
| 						name: m[1], | ||||
| 						key:  s, | ||||
| 						list: reToken.FindAllString(m[3], -1), | ||||
| 					}) | ||||
| 					el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2]) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`) | ||||
|  | ||||
| // resolve takes a parsed element and converts it into structured data | ||||
| // that can be used to generate the XML code. | ||||
| func (b *builder) resolve(e *element) { | ||||
| 	if e.resolved { | ||||
| 		return | ||||
| 	} | ||||
| 	b.elem = append(b.elem, e) | ||||
| 	e.resolved = true | ||||
| 	s := e.category | ||||
| 	found := make(map[string]bool) | ||||
| 	sequenceStart := []int{} | ||||
| 	for len(s) > 0 { | ||||
| 		m := reCat.FindStringSubmatch(s) | ||||
| 		if m == nil { | ||||
| 			log.Fatalf("%s: invalid category string %q", e.name, s) | ||||
| 		} | ||||
| 		repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1]) | ||||
| 		switch m[1] { | ||||
| 		case "": | ||||
| 		case "(": | ||||
| 			sequenceStart = append(sequenceStart, len(e.sub)) | ||||
| 		case ")": | ||||
| 			if len(sequenceStart) == 0 { | ||||
| 				log.Fatalf("%s: unmatched closing parenthesis", e.name) | ||||
| 			} | ||||
| 			for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ { | ||||
| 				e.sub[i].repeat = e.sub[i].repeat || repeat | ||||
| 			} | ||||
| 			sequenceStart = sequenceStart[:len(sequenceStart)-1] | ||||
| 		default: | ||||
| 			if in(b.info.skipElem, m[1]) { | ||||
| 			} else if sub, ok := b.index[m[1]]; ok { | ||||
| 				if !found[sub.name] { | ||||
| 					e.sub = append(e.sub, struct { | ||||
| 						e      *element | ||||
| 						repeat bool | ||||
| 					}{sub, repeat}) | ||||
| 					found[sub.name] = true | ||||
| 					b.resolve(sub) | ||||
| 				} | ||||
| 			} else if m[1] == "#PCDATA" || m[1] == "ANY" { | ||||
| 			} else if m[1] != "EMPTY" { | ||||
| 				log.Fatalf("resolve:%s: element %q not found", e.name, m[1]) | ||||
| 			} | ||||
| 		} | ||||
| 		s = s[len(m[0]):] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // return true if s is contained in set. | ||||
| func in(set []string, s string) bool { | ||||
| 	for _, v := range set { | ||||
| 		if v == s { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| var repl = strings.NewReplacer("-", " ", "_", " ") | ||||
|  | ||||
| // title puts the first character or each character following '_' in title case and | ||||
| // removes all occurrences of '_'. | ||||
| func title(s string) string { | ||||
| 	return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1) | ||||
| } | ||||
|  | ||||
| // writeElem generates Go code for a single element, recursively. | ||||
| func (b *builder) writeElem(tab int, e *element) { | ||||
| 	p := func(f string, x ...interface{}) { | ||||
| 		f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1) | ||||
| 		fmt.Fprintf(b.w, f, x...) | ||||
| 	} | ||||
| 	if len(e.sub) == 0 && len(e.attr) == 0 { | ||||
| 		p("Common") | ||||
| 		return | ||||
| 	} | ||||
| 	p("struct {") | ||||
| 	tab++ | ||||
| 	p("\nCommon") | ||||
| 	for _, attr := range e.attr { | ||||
| 		if !in(b.info.skipAttr, attr.name) { | ||||
| 			p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, sub := range e.sub { | ||||
| 		if in(b.info.predefined, sub.e.name) { | ||||
| 			p("\n%sElem", sub.e.name) | ||||
| 			continue | ||||
| 		} | ||||
| 		if in(b.info.skipElem, sub.e.name) { | ||||
| 			continue | ||||
| 		} | ||||
| 		p("\n%s ", title(sub.e.name)) | ||||
| 		if sub.repeat { | ||||
| 			p("[]") | ||||
| 		} | ||||
| 		p("*") | ||||
| 		if in(b.info.top, sub.e.name) { | ||||
| 			p(title(sub.e.name)) | ||||
| 		} else { | ||||
| 			b.writeElem(tab, sub.e) | ||||
| 		} | ||||
| 		p(" `xml:\"%s\"`", sub.e.name) | ||||
| 	} | ||||
| 	tab-- | ||||
| 	p("\n}") | ||||
| } | ||||
|  | ||||
| // write generates the Go XML code. | ||||
| func (b *builder) write() { | ||||
| 	for i, name := range b.info.top { | ||||
| 		e := b.index[name] | ||||
| 		if e != nil { | ||||
| 			fmt.Fprintf(b.w, comments[name]) | ||||
| 			name := title(e.name) | ||||
| 			if i == 0 { | ||||
| 				name = b.info.root | ||||
| 			} | ||||
| 			fmt.Fprintf(b.w, "type %s ", name) | ||||
| 			b.writeElem(0, e) | ||||
| 			fmt.Fprint(b.w, "\n") | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										602
									
								
								vendor/golang.org/x/text/unicode/cldr/resolve.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										602
									
								
								vendor/golang.org/x/text/unicode/cldr/resolve.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,602 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package cldr | ||||
|  | ||||
| // This file implements the various inheritance constructs defined by LDML. | ||||
| // See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity | ||||
| // for more details. | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"log" | ||||
| 	"reflect" | ||||
| 	"regexp" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // fieldIter iterates over fields in a struct. It includes | ||||
| // fields of embedded structs. | ||||
| type fieldIter struct { | ||||
| 	v        reflect.Value | ||||
| 	index, n []int | ||||
| } | ||||
|  | ||||
| func iter(v reflect.Value) fieldIter { | ||||
| 	if v.Kind() != reflect.Struct { | ||||
| 		log.Panicf("value %v must be a struct", v) | ||||
| 	} | ||||
| 	i := fieldIter{ | ||||
| 		v:     v, | ||||
| 		index: []int{0}, | ||||
| 		n:     []int{v.NumField()}, | ||||
| 	} | ||||
| 	i.descent() | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| func (i *fieldIter) descent() { | ||||
| 	for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() { | ||||
| 		i.index = append(i.index, 0) | ||||
| 		i.n = append(i.n, f.Type.NumField()) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (i *fieldIter) done() bool { | ||||
| 	return len(i.index) == 1 && i.index[0] >= i.n[0] | ||||
| } | ||||
|  | ||||
| func skip(f reflect.StructField) bool { | ||||
| 	return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z') | ||||
| } | ||||
|  | ||||
| func (i *fieldIter) next() { | ||||
| 	for { | ||||
| 		k := len(i.index) - 1 | ||||
| 		i.index[k]++ | ||||
| 		if i.index[k] < i.n[k] { | ||||
| 			if !skip(i.field()) { | ||||
| 				break | ||||
| 			} | ||||
| 		} else { | ||||
| 			if k == 0 { | ||||
| 				return | ||||
| 			} | ||||
| 			i.index = i.index[:k] | ||||
| 			i.n = i.n[:k] | ||||
| 		} | ||||
| 	} | ||||
| 	i.descent() | ||||
| } | ||||
|  | ||||
| func (i *fieldIter) value() reflect.Value { | ||||
| 	return i.v.FieldByIndex(i.index) | ||||
| } | ||||
|  | ||||
| func (i *fieldIter) field() reflect.StructField { | ||||
| 	return i.v.Type().FieldByIndex(i.index) | ||||
| } | ||||
|  | ||||
| type visitor func(v reflect.Value) error | ||||
|  | ||||
| var stopDescent = fmt.Errorf("do not recurse") | ||||
|  | ||||
| func (f visitor) visit(x interface{}) error { | ||||
| 	return f.visitRec(reflect.ValueOf(x)) | ||||
| } | ||||
|  | ||||
| // visit recursively calls f on all nodes in v. | ||||
| func (f visitor) visitRec(v reflect.Value) error { | ||||
| 	if v.Kind() == reflect.Ptr { | ||||
| 		if v.IsNil() { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return f.visitRec(v.Elem()) | ||||
| 	} | ||||
| 	if err := f(v); err != nil { | ||||
| 		if err == stopDescent { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.Struct: | ||||
| 		for i := iter(v); !i.done(); i.next() { | ||||
| 			if err := f.visitRec(i.value()); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	case reflect.Slice: | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			if err := f.visitRec(v.Index(i)); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // getPath is used for error reporting purposes only. | ||||
| func getPath(e Elem) string { | ||||
| 	if e == nil { | ||||
| 		return "<nil>" | ||||
| 	} | ||||
| 	if e.enclosing() == nil { | ||||
| 		return e.GetCommon().name | ||||
| 	} | ||||
| 	if e.GetCommon().Type == "" { | ||||
| 		return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name) | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type) | ||||
| } | ||||
|  | ||||
| // xmlName returns the xml name of the element or attribute | ||||
| func xmlName(f reflect.StructField) (name string, attr bool) { | ||||
| 	tags := strings.Split(f.Tag.Get("xml"), ",") | ||||
| 	for _, s := range tags { | ||||
| 		attr = attr || s == "attr" | ||||
| 	} | ||||
| 	return tags[0], attr | ||||
| } | ||||
|  | ||||
| func findField(v reflect.Value, key string) (reflect.Value, error) { | ||||
| 	v = reflect.Indirect(v) | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		if n, _ := xmlName(i.field()); n == key { | ||||
| 			return i.value(), nil | ||||
| 		} | ||||
| 	} | ||||
| 	return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface()) | ||||
| } | ||||
|  | ||||
| var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`) | ||||
|  | ||||
| func walkXPath(e Elem, path string) (res Elem, err error) { | ||||
| 	for _, c := range strings.Split(path, "/") { | ||||
| 		if c == ".." { | ||||
| 			if e = e.enclosing(); e == nil { | ||||
| 				panic("path ..") | ||||
| 				return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path) | ||||
| 			} | ||||
| 			continue | ||||
| 		} else if c == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 		m := xpathPart.FindStringSubmatch(c) | ||||
| 		if len(m) == 0 || len(m[0]) != len(c) { | ||||
| 			return nil, fmt.Errorf("cldr: syntax error in path component %q", c) | ||||
| 		} | ||||
| 		v, err := findField(reflect.ValueOf(e), m[1]) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		switch v.Kind() { | ||||
| 		case reflect.Slice: | ||||
| 			i := 0 | ||||
| 			if m[2] != "" || v.Len() > 1 { | ||||
| 				if m[2] == "" { | ||||
| 					m[2] = "type" | ||||
| 					if m[3] = e.GetCommon().Default(); m[3] == "" { | ||||
| 						return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1]) | ||||
| 					} | ||||
| 				} | ||||
| 				for ; i < v.Len(); i++ { | ||||
| 					vi := v.Index(i) | ||||
| 					key, err := findField(vi.Elem(), m[2]) | ||||
| 					if err != nil { | ||||
| 						return nil, err | ||||
| 					} | ||||
| 					key = reflect.Indirect(key) | ||||
| 					if key.Kind() == reflect.String && key.String() == m[3] { | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			if i == v.Len() || v.Index(i).IsNil() { | ||||
| 				return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3]) | ||||
| 			} | ||||
| 			e = v.Index(i).Interface().(Elem) | ||||
| 		case reflect.Ptr: | ||||
| 			if v.IsNil() { | ||||
| 				return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name) | ||||
| 			} | ||||
| 			var ok bool | ||||
| 			if e, ok = v.Interface().(Elem); !ok { | ||||
| 				return nil, fmt.Errorf("cldr: %q is not an XML element", m[1]) | ||||
| 			} else if m[2] != "" || m[3] != "" { | ||||
| 				return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1]) | ||||
| 			} | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("cldr: %q is not an XML element", m[1]) | ||||
| 		} | ||||
| 	} | ||||
| 	return e, nil | ||||
| } | ||||
|  | ||||
| const absPrefix = "//ldml/" | ||||
|  | ||||
| func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) { | ||||
| 	if src != "locale" { | ||||
| 		if !strings.HasPrefix(path, absPrefix) { | ||||
| 			return nil, fmt.Errorf("cldr: expected absolute path, found %q", path) | ||||
| 		} | ||||
| 		path = path[len(absPrefix):] | ||||
| 		if e, err = cldr.resolve(src); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	return walkXPath(e, path) | ||||
| } | ||||
|  | ||||
| func (cldr *CLDR) resolveAndMergeAlias(e Elem) error { | ||||
| 	alias := e.GetCommon().Alias | ||||
| 	if alias == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	a, err := cldr.resolveAlias(e, alias.Source, alias.Path) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err) | ||||
| 	} | ||||
| 	// Ensure alias node was already evaluated. TODO: avoid double evaluation. | ||||
| 	err = cldr.resolveAndMergeAlias(a) | ||||
| 	v := reflect.ValueOf(e).Elem() | ||||
| 	for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() { | ||||
| 		if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() { | ||||
| 			if _, attr := xmlName(i.field()); !attr { | ||||
| 				v.FieldByIndex(i.index).Set(vv) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (cldr *CLDR) aliasResolver() visitor { | ||||
| 	return func(v reflect.Value) (err error) { | ||||
| 		if e, ok := v.Addr().Interface().(Elem); ok { | ||||
| 			err = cldr.resolveAndMergeAlias(e) | ||||
| 			if err == nil && blocking[e.GetCommon().name] { | ||||
| 				return stopDescent | ||||
| 			} | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // elements within blocking elements do not inherit. | ||||
| // Taken from CLDR's supplementalMetaData.xml. | ||||
| var blocking = map[string]bool{ | ||||
| 	"identity":         true, | ||||
| 	"supplementalData": true, | ||||
| 	"cldrTest":         true, | ||||
| 	"collation":        true, | ||||
| 	"transform":        true, | ||||
| } | ||||
|  | ||||
| // Distinguishing attributes affect inheritance; two elements with different | ||||
| // distinguishing attributes are treated as different for purposes of inheritance, | ||||
| // except when such attributes occur in the indicated elements. | ||||
| // Taken from CLDR's supplementalMetaData.xml. | ||||
| var distinguishing = map[string][]string{ | ||||
| 	"key":        nil, | ||||
| 	"request_id": nil, | ||||
| 	"id":         nil, | ||||
| 	"registry":   nil, | ||||
| 	"alt":        nil, | ||||
| 	"iso4217":    nil, | ||||
| 	"iso3166":    nil, | ||||
| 	"mzone":      nil, | ||||
| 	"from":       nil, | ||||
| 	"to":         nil, | ||||
| 	"type": []string{ | ||||
| 		"abbreviationFallback", | ||||
| 		"default", | ||||
| 		"mapping", | ||||
| 		"measurementSystem", | ||||
| 		"preferenceOrdering", | ||||
| 	}, | ||||
| 	"numberSystem": nil, | ||||
| } | ||||
|  | ||||
| func in(set []string, s string) bool { | ||||
| 	for _, v := range set { | ||||
| 		if v == s { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // attrKey computes a key based on the distinguishable attributes of | ||||
| // an element and it's values. | ||||
| func attrKey(v reflect.Value, exclude ...string) string { | ||||
| 	parts := []string{} | ||||
| 	ename := v.Interface().(Elem).GetCommon().name | ||||
| 	v = v.Elem() | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		if name, attr := xmlName(i.field()); attr { | ||||
| 			if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) { | ||||
| 				v := i.value() | ||||
| 				if v.Kind() == reflect.Ptr { | ||||
| 					v = v.Elem() | ||||
| 				} | ||||
| 				if v.IsValid() { | ||||
| 					parts = append(parts, fmt.Sprintf("%s=%s", name, v.String())) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	sort.Strings(parts) | ||||
| 	return strings.Join(parts, ";") | ||||
| } | ||||
|  | ||||
| // Key returns a key for e derived from all distinguishing attributes | ||||
| // except those specified by exclude. | ||||
| func Key(e Elem, exclude ...string) string { | ||||
| 	return attrKey(reflect.ValueOf(e), exclude...) | ||||
| } | ||||
|  | ||||
| // linkEnclosing sets the enclosing element as well as the name | ||||
| // for all sub-elements of child, recursively. | ||||
| func linkEnclosing(parent, child Elem) { | ||||
| 	child.setEnclosing(parent) | ||||
| 	v := reflect.ValueOf(child).Elem() | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		vf := i.value() | ||||
| 		if vf.Kind() == reflect.Slice { | ||||
| 			for j := 0; j < vf.Len(); j++ { | ||||
| 				linkEnclosing(child, vf.Index(j).Interface().(Elem)) | ||||
| 			} | ||||
| 		} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct { | ||||
| 			linkEnclosing(child, vf.Interface().(Elem)) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func setNames(e Elem, name string) { | ||||
| 	e.setName(name) | ||||
| 	v := reflect.ValueOf(e).Elem() | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		vf := i.value() | ||||
| 		name, _ = xmlName(i.field()) | ||||
| 		if vf.Kind() == reflect.Slice { | ||||
| 			for j := 0; j < vf.Len(); j++ { | ||||
| 				setNames(vf.Index(j).Interface().(Elem), name) | ||||
| 			} | ||||
| 		} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct { | ||||
| 			setNames(vf.Interface().(Elem), name) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // deepCopy copies elements of v recursively.  All elements of v that may | ||||
| // be modified by inheritance are explicitly copied. | ||||
| func deepCopy(v reflect.Value) reflect.Value { | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.Ptr: | ||||
| 		if v.IsNil() || v.Elem().Kind() != reflect.Struct { | ||||
| 			return v | ||||
| 		} | ||||
| 		nv := reflect.New(v.Elem().Type()) | ||||
| 		nv.Elem().Set(v.Elem()) | ||||
| 		deepCopyRec(nv.Elem(), v.Elem()) | ||||
| 		return nv | ||||
| 	case reflect.Slice: | ||||
| 		nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len()) | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			deepCopyRec(nv.Index(i), v.Index(i)) | ||||
| 		} | ||||
| 		return nv | ||||
| 	} | ||||
| 	panic("deepCopy: must be called with pointer or slice") | ||||
| } | ||||
|  | ||||
| // deepCopyRec is only called by deepCopy. | ||||
| func deepCopyRec(nv, v reflect.Value) { | ||||
| 	if v.Kind() == reflect.Struct { | ||||
| 		t := v.Type() | ||||
| 		for i := 0; i < v.NumField(); i++ { | ||||
| 			if name, attr := xmlName(t.Field(i)); name != "" && !attr { | ||||
| 				deepCopyRec(nv.Field(i), v.Field(i)) | ||||
| 			} | ||||
| 		} | ||||
| 	} else { | ||||
| 		nv.Set(deepCopy(v)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // newNode is used to insert a missing node during inheritance. | ||||
| func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value { | ||||
| 	n := reflect.New(v.Type()) | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		if name, attr := xmlName(i.field()); name == "" || attr { | ||||
| 			n.Elem().FieldByIndex(i.index).Set(i.value()) | ||||
| 		} | ||||
| 	} | ||||
| 	n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem)) | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| // v, parent must be pointers to struct | ||||
| func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) { | ||||
| 	t := v.Type() | ||||
| 	nv := reflect.New(t) | ||||
| 	nv.Elem().Set(v) | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		vf := i.value() | ||||
| 		f := i.field() | ||||
| 		name, attr := xmlName(f) | ||||
| 		if name == "" || attr { | ||||
| 			continue | ||||
| 		} | ||||
| 		pf := parent.FieldByIndex(i.index) | ||||
| 		if blocking[name] { | ||||
| 			if vf.IsNil() { | ||||
| 				vf = pf | ||||
| 			} | ||||
| 			nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf)) | ||||
| 			continue | ||||
| 		} | ||||
| 		switch f.Type.Kind() { | ||||
| 		case reflect.Ptr: | ||||
| 			if f.Type.Elem().Kind() == reflect.Struct { | ||||
| 				if !vf.IsNil() { | ||||
| 					if vf, err = cldr.inheritStructPtr(vf, pf); err != nil { | ||||
| 						return reflect.Value{}, err | ||||
| 					} | ||||
| 					vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem)) | ||||
| 					nv.Elem().FieldByIndex(i.index).Set(vf) | ||||
| 				} else if !pf.IsNil() { | ||||
| 					n := cldr.newNode(pf.Elem(), v) | ||||
| 					if vf, err = cldr.inheritStructPtr(n, pf); err != nil { | ||||
| 						return reflect.Value{}, err | ||||
| 					} | ||||
| 					vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem)) | ||||
| 					nv.Elem().FieldByIndex(i.index).Set(vf) | ||||
| 				} | ||||
| 			} | ||||
| 		case reflect.Slice: | ||||
| 			vf, err := cldr.inheritSlice(nv.Elem(), vf, pf) | ||||
| 			if err != nil { | ||||
| 				return reflect.Zero(t), err | ||||
| 			} | ||||
| 			nv.Elem().FieldByIndex(i.index).Set(vf) | ||||
| 		} | ||||
| 	} | ||||
| 	return nv, nil | ||||
| } | ||||
|  | ||||
| func root(e Elem) *LDML { | ||||
| 	for ; e.enclosing() != nil; e = e.enclosing() { | ||||
| 	} | ||||
| 	return e.(*LDML) | ||||
| } | ||||
|  | ||||
| // inheritStructPtr first merges possible aliases in with v and then inherits | ||||
| // any underspecified elements from parent. | ||||
| func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) { | ||||
| 	if !v.IsNil() { | ||||
| 		e := v.Interface().(Elem).GetCommon() | ||||
| 		alias := e.Alias | ||||
| 		if alias == nil && !parent.IsNil() { | ||||
| 			alias = parent.Interface().(Elem).GetCommon().Alias | ||||
| 		} | ||||
| 		if alias != nil { | ||||
| 			a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path) | ||||
| 			if a != nil { | ||||
| 				if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil { | ||||
| 					return reflect.Value{}, err | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		if !parent.IsNil() { | ||||
| 			return cldr.inheritFields(v.Elem(), parent.Elem()) | ||||
| 		} | ||||
| 	} else if parent.IsNil() { | ||||
| 		panic("should not reach here") | ||||
| 	} | ||||
| 	return v, nil | ||||
| } | ||||
|  | ||||
| // Must be slice of struct pointers. | ||||
| func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) { | ||||
| 	t := v.Type() | ||||
| 	index := make(map[string]reflect.Value) | ||||
| 	if !v.IsNil() { | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			vi := v.Index(i) | ||||
| 			key := attrKey(vi) | ||||
| 			index[key] = vi | ||||
| 		} | ||||
| 	} | ||||
| 	if !parent.IsNil() { | ||||
| 		for i := 0; i < parent.Len(); i++ { | ||||
| 			vi := parent.Index(i) | ||||
| 			key := attrKey(vi) | ||||
| 			if w, ok := index[key]; ok { | ||||
| 				index[key], err = cldr.inheritStructPtr(w, vi) | ||||
| 			} else { | ||||
| 				n := cldr.newNode(vi.Elem(), enc) | ||||
| 				index[key], err = cldr.inheritStructPtr(n, vi) | ||||
| 			} | ||||
| 			index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem)) | ||||
| 			if err != nil { | ||||
| 				return v, err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	keys := make([]string, 0, len(index)) | ||||
| 	for k, _ := range index { | ||||
| 		keys = append(keys, k) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 	sl := reflect.MakeSlice(t, len(index), len(index)) | ||||
| 	for i, k := range keys { | ||||
| 		sl.Index(i).Set(index[k]) | ||||
| 	} | ||||
| 	return sl, nil | ||||
| } | ||||
|  | ||||
| func parentLocale(loc string) string { | ||||
| 	parts := strings.Split(loc, "_") | ||||
| 	if len(parts) == 1 { | ||||
| 		return "root" | ||||
| 	} | ||||
| 	parts = parts[:len(parts)-1] | ||||
| 	key := strings.Join(parts, "_") | ||||
| 	return key | ||||
| } | ||||
|  | ||||
| func (cldr *CLDR) resolve(loc string) (res *LDML, err error) { | ||||
| 	if r := cldr.resolved[loc]; r != nil { | ||||
| 		return r, nil | ||||
| 	} | ||||
| 	x := cldr.RawLDML(loc) | ||||
| 	if x == nil { | ||||
| 		return nil, fmt.Errorf("cldr: unknown locale %q", loc) | ||||
| 	} | ||||
| 	var v reflect.Value | ||||
| 	if loc == "root" { | ||||
| 		x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML) | ||||
| 		linkEnclosing(nil, x) | ||||
| 		err = cldr.aliasResolver().visit(x) | ||||
| 	} else { | ||||
| 		key := parentLocale(loc) | ||||
| 		var parent *LDML | ||||
| 		for ; cldr.locale[key] == nil; key = parentLocale(key) { | ||||
| 		} | ||||
| 		if parent, err = cldr.resolve(key); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem()) | ||||
| 		x = v.Interface().(*LDML) | ||||
| 		linkEnclosing(nil, x) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	cldr.resolved[loc] = x | ||||
| 	return x, err | ||||
| } | ||||
|  | ||||
| // finalize finalizes the initialization of the raw LDML structs.  It also | ||||
| // removed unwanted fields, as specified by filter, so that they will not | ||||
| // be unnecessarily evaluated. | ||||
| func (cldr *CLDR) finalize(filter []string) { | ||||
| 	for _, x := range cldr.locale { | ||||
| 		if filter != nil { | ||||
| 			v := reflect.ValueOf(x).Elem() | ||||
| 			t := v.Type() | ||||
| 			for i := 0; i < v.NumField(); i++ { | ||||
| 				f := t.Field(i) | ||||
| 				name, _ := xmlName(f) | ||||
| 				if name != "" && name != "identity" && !in(filter, name) { | ||||
| 					v.Field(i).Set(reflect.Zero(f.Type)) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		linkEnclosing(nil, x) // for resolving aliases and paths | ||||
| 		setNames(x, "ldml") | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										144
									
								
								vendor/golang.org/x/text/unicode/cldr/slice.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								vendor/golang.org/x/text/unicode/cldr/slice.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,144 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package cldr | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| ) | ||||
|  | ||||
| // Slice provides utilities for modifying slices of elements. | ||||
| // It can be wrapped around any slice of which the element type implements | ||||
| // interface Elem. | ||||
| type Slice struct { | ||||
| 	ptr reflect.Value | ||||
| 	typ reflect.Type | ||||
| } | ||||
|  | ||||
| // Value returns the reflect.Value of the underlying slice. | ||||
| func (s *Slice) Value() reflect.Value { | ||||
| 	return s.ptr.Elem() | ||||
| } | ||||
|  | ||||
| // MakeSlice wraps a pointer to a slice of Elems. | ||||
| // It replaces the array pointed to by the slice so that subsequent modifications | ||||
| // do not alter the data in a CLDR type. | ||||
| // It panics if an incorrect type is passed. | ||||
| func MakeSlice(slicePtr interface{}) Slice { | ||||
| 	ptr := reflect.ValueOf(slicePtr) | ||||
| 	if ptr.Kind() != reflect.Ptr { | ||||
| 		panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type())) | ||||
| 	} | ||||
| 	sl := ptr.Elem() | ||||
| 	if sl.Kind() != reflect.Slice { | ||||
| 		panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type())) | ||||
| 	} | ||||
| 	intf := reflect.TypeOf((*Elem)(nil)).Elem() | ||||
| 	if !sl.Type().Elem().Implements(intf) { | ||||
| 		panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem())) | ||||
| 	} | ||||
| 	nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len()) | ||||
| 	reflect.Copy(nsl, sl) | ||||
| 	sl.Set(nsl) | ||||
| 	return Slice{ | ||||
| 		ptr: ptr, | ||||
| 		typ: sl.Type().Elem().Elem(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s Slice) indexForAttr(a string) []int { | ||||
| 	for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() { | ||||
| 		if n, _ := xmlName(i.field()); n == a { | ||||
| 			return i.index | ||||
| 		} | ||||
| 	} | ||||
| 	panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ)) | ||||
| } | ||||
|  | ||||
| // Filter filters s to only include elements for which fn returns true. | ||||
| func (s Slice) Filter(fn func(e Elem) bool) { | ||||
| 	k := 0 | ||||
| 	sl := s.Value() | ||||
| 	for i := 0; i < sl.Len(); i++ { | ||||
| 		vi := sl.Index(i) | ||||
| 		if fn(vi.Interface().(Elem)) { | ||||
| 			sl.Index(k).Set(vi) | ||||
| 			k++ | ||||
| 		} | ||||
| 	} | ||||
| 	sl.Set(sl.Slice(0, k)) | ||||
| } | ||||
|  | ||||
| // Group finds elements in s for which fn returns the same value and groups | ||||
| // them in a new Slice. | ||||
| func (s Slice) Group(fn func(e Elem) string) []Slice { | ||||
| 	m := make(map[string][]reflect.Value) | ||||
| 	sl := s.Value() | ||||
| 	for i := 0; i < sl.Len(); i++ { | ||||
| 		vi := sl.Index(i) | ||||
| 		key := fn(vi.Interface().(Elem)) | ||||
| 		m[key] = append(m[key], vi) | ||||
| 	} | ||||
| 	keys := []string{} | ||||
| 	for k, _ := range m { | ||||
| 		keys = append(keys, k) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 	res := []Slice{} | ||||
| 	for _, k := range keys { | ||||
| 		nsl := reflect.New(sl.Type()) | ||||
| 		nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...)) | ||||
| 		res = append(res, MakeSlice(nsl.Interface())) | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
|  | ||||
| // SelectAnyOf filters s to contain only elements for which attr matches | ||||
| // any of the values. | ||||
| func (s Slice) SelectAnyOf(attr string, values ...string) { | ||||
| 	index := s.indexForAttr(attr) | ||||
| 	s.Filter(func(e Elem) bool { | ||||
| 		vf := reflect.ValueOf(e).Elem().FieldByIndex(index) | ||||
| 		return in(values, vf.String()) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // SelectOnePerGroup filters s to include at most one element e per group of | ||||
| // elements matching Key(attr), where e has an attribute a that matches any | ||||
| // the values in v. | ||||
| // If more than one element in a group matches a value in v preference | ||||
| // is given to the element that matches the first value in v. | ||||
| func (s Slice) SelectOnePerGroup(a string, v []string) { | ||||
| 	index := s.indexForAttr(a) | ||||
| 	grouped := s.Group(func(e Elem) string { return Key(e, a) }) | ||||
| 	sl := s.Value() | ||||
| 	sl.Set(sl.Slice(0, 0)) | ||||
| 	for _, g := range grouped { | ||||
| 		e := reflect.Value{} | ||||
| 		found := len(v) | ||||
| 		gsl := g.Value() | ||||
| 		for i := 0; i < gsl.Len(); i++ { | ||||
| 			vi := gsl.Index(i).Elem().FieldByIndex(index) | ||||
| 			j := 0 | ||||
| 			for ; j < len(v) && v[j] != vi.String(); j++ { | ||||
| 			} | ||||
| 			if j < found { | ||||
| 				found = j | ||||
| 				e = gsl.Index(i) | ||||
| 			} | ||||
| 		} | ||||
| 		if found < len(v) { | ||||
| 			sl.Set(reflect.Append(sl, e)) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // SelectDraft drops all elements from the list with a draft level smaller than d | ||||
| // and selects the highest draft level of the remaining. | ||||
| // This method assumes that the input CLDR is canonicalized. | ||||
| func (s Slice) SelectDraft(d Draft) { | ||||
| 	s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):]) | ||||
| } | ||||
							
								
								
									
										1456
									
								
								vendor/golang.org/x/text/unicode/cldr/xml.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1456
									
								
								vendor/golang.org/x/text/unicode/cldr/xml.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										508
									
								
								vendor/golang.org/x/text/unicode/norm/composition.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										508
									
								
								vendor/golang.org/x/text/unicode/norm/composition.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,508 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package norm | ||||
|  | ||||
| import "unicode/utf8" | ||||
|  | ||||
| const ( | ||||
| 	maxNonStarters = 30 | ||||
| 	// The maximum number of characters needed for a buffer is | ||||
| 	// maxNonStarters + 1 for the starter + 1 for the GCJ | ||||
| 	maxBufferSize    = maxNonStarters + 2 | ||||
| 	maxNFCExpansion  = 3  // NFC(0x1D160) | ||||
| 	maxNFKCExpansion = 18 // NFKC(0xFDFA) | ||||
|  | ||||
| 	maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128 | ||||
| ) | ||||
|  | ||||
| // ssState is used for reporting the segment state after inserting a rune. | ||||
| // It is returned by streamSafe.next. | ||||
| type ssState int | ||||
|  | ||||
| const ( | ||||
| 	// Indicates a rune was successfully added to the segment. | ||||
| 	ssSuccess ssState = iota | ||||
| 	// Indicates a rune starts a new segment and should not be added. | ||||
| 	ssStarter | ||||
| 	// Indicates a rune caused a segment overflow and a CGJ should be inserted. | ||||
| 	ssOverflow | ||||
| ) | ||||
|  | ||||
| // streamSafe implements the policy of when a CGJ should be inserted. | ||||
| type streamSafe uint8 | ||||
|  | ||||
| // first inserts the first rune of a segment. It is a faster version of next if | ||||
| // it is known p represents the first rune in a segment. | ||||
| func (ss *streamSafe) first(p Properties) { | ||||
| 	*ss = streamSafe(p.nTrailingNonStarters()) | ||||
| } | ||||
|  | ||||
| // insert returns a ssState value to indicate whether a rune represented by p | ||||
| // can be inserted. | ||||
| func (ss *streamSafe) next(p Properties) ssState { | ||||
| 	if *ss > maxNonStarters { | ||||
| 		panic("streamSafe was not reset") | ||||
| 	} | ||||
| 	n := p.nLeadingNonStarters() | ||||
| 	if *ss += streamSafe(n); *ss > maxNonStarters { | ||||
| 		*ss = 0 | ||||
| 		return ssOverflow | ||||
| 	} | ||||
| 	// The Stream-Safe Text Processing prescribes that the counting can stop | ||||
| 	// as soon as a starter is encountered. However, there are some starters, | ||||
| 	// like Jamo V and T, that can combine with other runes, leaving their | ||||
| 	// successive non-starters appended to the previous, possibly causing an | ||||
| 	// overflow. We will therefore consider any rune with a non-zero nLead to | ||||
| 	// be a non-starter. Note that it always hold that if nLead > 0 then | ||||
| 	// nLead == nTrail. | ||||
| 	if n == 0 { | ||||
| 		*ss = streamSafe(p.nTrailingNonStarters()) | ||||
| 		return ssStarter | ||||
| 	} | ||||
| 	return ssSuccess | ||||
| } | ||||
|  | ||||
| // backwards is used for checking for overflow and segment starts | ||||
| // when traversing a string backwards. Users do not need to call first | ||||
| // for the first rune. The state of the streamSafe retains the count of | ||||
| // the non-starters loaded. | ||||
| func (ss *streamSafe) backwards(p Properties) ssState { | ||||
| 	if *ss > maxNonStarters { | ||||
| 		panic("streamSafe was not reset") | ||||
| 	} | ||||
| 	c := *ss + streamSafe(p.nTrailingNonStarters()) | ||||
| 	if c > maxNonStarters { | ||||
| 		return ssOverflow | ||||
| 	} | ||||
| 	*ss = c | ||||
| 	if p.nLeadingNonStarters() == 0 { | ||||
| 		return ssStarter | ||||
| 	} | ||||
| 	return ssSuccess | ||||
| } | ||||
|  | ||||
| func (ss streamSafe) isMax() bool { | ||||
| 	return ss == maxNonStarters | ||||
| } | ||||
|  | ||||
| // GraphemeJoiner is inserted after maxNonStarters non-starter runes. | ||||
| const GraphemeJoiner = "\u034F" | ||||
|  | ||||
| // reorderBuffer is used to normalize a single segment.  Characters inserted with | ||||
| // insert are decomposed and reordered based on CCC. The compose method can | ||||
| // be used to recombine characters.  Note that the byte buffer does not hold | ||||
| // the UTF-8 characters in order.  Only the rune array is maintained in sorted | ||||
| // order. flush writes the resulting segment to a byte array. | ||||
| type reorderBuffer struct { | ||||
| 	rune  [maxBufferSize]Properties // Per character info. | ||||
| 	byte  [maxByteBufferSize]byte   // UTF-8 buffer. Referenced by runeInfo.pos. | ||||
| 	nbyte uint8                     // Number or bytes. | ||||
| 	ss    streamSafe                // For limiting length of non-starter sequence. | ||||
| 	nrune int                       // Number of runeInfos. | ||||
| 	f     formInfo | ||||
|  | ||||
| 	src      input | ||||
| 	nsrc     int | ||||
| 	tmpBytes input | ||||
|  | ||||
| 	out    []byte | ||||
| 	flushF func(*reorderBuffer) bool | ||||
| } | ||||
|  | ||||
| func (rb *reorderBuffer) init(f Form, src []byte) { | ||||
| 	rb.f = *formTable[f] | ||||
| 	rb.src.setBytes(src) | ||||
| 	rb.nsrc = len(src) | ||||
| 	rb.ss = 0 | ||||
| } | ||||
|  | ||||
| func (rb *reorderBuffer) initString(f Form, src string) { | ||||
| 	rb.f = *formTable[f] | ||||
| 	rb.src.setString(src) | ||||
| 	rb.nsrc = len(src) | ||||
| 	rb.ss = 0 | ||||
| } | ||||
|  | ||||
| func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) { | ||||
| 	rb.out = out | ||||
| 	rb.flushF = f | ||||
| } | ||||
|  | ||||
| // reset discards all characters from the buffer. | ||||
| func (rb *reorderBuffer) reset() { | ||||
| 	rb.nrune = 0 | ||||
| 	rb.nbyte = 0 | ||||
| } | ||||
|  | ||||
| func (rb *reorderBuffer) doFlush() bool { | ||||
| 	if rb.f.composing { | ||||
| 		rb.compose() | ||||
| 	} | ||||
| 	res := rb.flushF(rb) | ||||
| 	rb.reset() | ||||
| 	return res | ||||
| } | ||||
|  | ||||
| // appendFlush appends the normalized segment to rb.out. | ||||
| func appendFlush(rb *reorderBuffer) bool { | ||||
| 	for i := 0; i < rb.nrune; i++ { | ||||
| 		start := rb.rune[i].pos | ||||
| 		end := start + rb.rune[i].size | ||||
| 		rb.out = append(rb.out, rb.byte[start:end]...) | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // flush appends the normalized segment to out and resets rb. | ||||
| func (rb *reorderBuffer) flush(out []byte) []byte { | ||||
| 	for i := 0; i < rb.nrune; i++ { | ||||
| 		start := rb.rune[i].pos | ||||
| 		end := start + rb.rune[i].size | ||||
| 		out = append(out, rb.byte[start:end]...) | ||||
| 	} | ||||
| 	rb.reset() | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // flushCopy copies the normalized segment to buf and resets rb. | ||||
| // It returns the number of bytes written to buf. | ||||
| func (rb *reorderBuffer) flushCopy(buf []byte) int { | ||||
| 	p := 0 | ||||
| 	for i := 0; i < rb.nrune; i++ { | ||||
| 		runep := rb.rune[i] | ||||
| 		p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size]) | ||||
| 	} | ||||
| 	rb.reset() | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| // insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class. | ||||
| // It returns false if the buffer is not large enough to hold the rune. | ||||
| // It is used internally by insert and insertString only. | ||||
| func (rb *reorderBuffer) insertOrdered(info Properties) { | ||||
| 	n := rb.nrune | ||||
| 	b := rb.rune[:] | ||||
| 	cc := info.ccc | ||||
| 	if cc > 0 { | ||||
| 		// Find insertion position + move elements to make room. | ||||
| 		for ; n > 0; n-- { | ||||
| 			if b[n-1].ccc <= cc { | ||||
| 				break | ||||
| 			} | ||||
| 			b[n] = b[n-1] | ||||
| 		} | ||||
| 	} | ||||
| 	rb.nrune += 1 | ||||
| 	pos := uint8(rb.nbyte) | ||||
| 	rb.nbyte += utf8.UTFMax | ||||
| 	info.pos = pos | ||||
| 	b[n] = info | ||||
| } | ||||
|  | ||||
| // insertErr is an error code returned by insert. Using this type instead | ||||
| // of error improves performance up to 20% for many of the benchmarks. | ||||
| type insertErr int | ||||
|  | ||||
| const ( | ||||
| 	iSuccess insertErr = -iota | ||||
| 	iShortDst | ||||
| 	iShortSrc | ||||
| ) | ||||
|  | ||||
| // insertFlush inserts the given rune in the buffer ordered by CCC. | ||||
| // If a decomposition with multiple segments are encountered, they leading | ||||
| // ones are flushed. | ||||
| // It returns a non-zero error code if the rune was not inserted. | ||||
| func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr { | ||||
| 	if rune := src.hangul(i); rune != 0 { | ||||
| 		rb.decomposeHangul(rune) | ||||
| 		return iSuccess | ||||
| 	} | ||||
| 	if info.hasDecomposition() { | ||||
| 		return rb.insertDecomposed(info.Decomposition()) | ||||
| 	} | ||||
| 	rb.insertSingle(src, i, info) | ||||
| 	return iSuccess | ||||
| } | ||||
|  | ||||
| // insertUnsafe inserts the given rune in the buffer ordered by CCC. | ||||
| // It is assumed there is sufficient space to hold the runes. It is the | ||||
| // responsibility of the caller to ensure this. This can be done by checking | ||||
| // the state returned by the streamSafe type. | ||||
| func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) { | ||||
| 	if rune := src.hangul(i); rune != 0 { | ||||
| 		rb.decomposeHangul(rune) | ||||
| 	} | ||||
| 	if info.hasDecomposition() { | ||||
| 		// TODO: inline. | ||||
| 		rb.insertDecomposed(info.Decomposition()) | ||||
| 	} else { | ||||
| 		rb.insertSingle(src, i, info) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // insertDecomposed inserts an entry in to the reorderBuffer for each rune | ||||
| // in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes. | ||||
| // It flushes the buffer on each new segment start. | ||||
| func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr { | ||||
| 	rb.tmpBytes.setBytes(dcomp) | ||||
| 	// As the streamSafe accounting already handles the counting for modifiers, | ||||
| 	// we don't have to call next. However, we do need to keep the accounting | ||||
| 	// intact when flushing the buffer. | ||||
| 	for i := 0; i < len(dcomp); { | ||||
| 		info := rb.f.info(rb.tmpBytes, i) | ||||
| 		if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() { | ||||
| 			return iShortDst | ||||
| 		} | ||||
| 		i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)]) | ||||
| 		rb.insertOrdered(info) | ||||
| 	} | ||||
| 	return iSuccess | ||||
| } | ||||
|  | ||||
| // insertSingle inserts an entry in the reorderBuffer for the rune at | ||||
| // position i. info is the runeInfo for the rune at position i. | ||||
| func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) { | ||||
| 	src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size)) | ||||
| 	rb.insertOrdered(info) | ||||
| } | ||||
|  | ||||
| // insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb. | ||||
| func (rb *reorderBuffer) insertCGJ() { | ||||
| 	rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))}) | ||||
| } | ||||
|  | ||||
| // appendRune inserts a rune at the end of the buffer. It is used for Hangul. | ||||
| func (rb *reorderBuffer) appendRune(r rune) { | ||||
| 	bn := rb.nbyte | ||||
| 	sz := utf8.EncodeRune(rb.byte[bn:], rune(r)) | ||||
| 	rb.nbyte += utf8.UTFMax | ||||
| 	rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)} | ||||
| 	rb.nrune++ | ||||
| } | ||||
|  | ||||
| // assignRune sets a rune at position pos. It is used for Hangul and recomposition. | ||||
| func (rb *reorderBuffer) assignRune(pos int, r rune) { | ||||
| 	bn := rb.rune[pos].pos | ||||
| 	sz := utf8.EncodeRune(rb.byte[bn:], rune(r)) | ||||
| 	rb.rune[pos] = Properties{pos: bn, size: uint8(sz)} | ||||
| } | ||||
|  | ||||
| // runeAt returns the rune at position n. It is used for Hangul and recomposition. | ||||
| func (rb *reorderBuffer) runeAt(n int) rune { | ||||
| 	inf := rb.rune[n] | ||||
| 	r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size]) | ||||
| 	return r | ||||
| } | ||||
|  | ||||
| // bytesAt returns the UTF-8 encoding of the rune at position n. | ||||
| // It is used for Hangul and recomposition. | ||||
| func (rb *reorderBuffer) bytesAt(n int) []byte { | ||||
| 	inf := rb.rune[n] | ||||
| 	return rb.byte[inf.pos : int(inf.pos)+int(inf.size)] | ||||
| } | ||||
|  | ||||
| // For Hangul we combine algorithmically, instead of using tables. | ||||
| const ( | ||||
| 	hangulBase  = 0xAC00 // UTF-8(hangulBase) -> EA B0 80 | ||||
| 	hangulBase0 = 0xEA | ||||
| 	hangulBase1 = 0xB0 | ||||
| 	hangulBase2 = 0x80 | ||||
|  | ||||
| 	hangulEnd  = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4 | ||||
| 	hangulEnd0 = 0xED | ||||
| 	hangulEnd1 = 0x9E | ||||
| 	hangulEnd2 = 0xA4 | ||||
|  | ||||
| 	jamoLBase  = 0x1100 // UTF-8(jamoLBase) -> E1 84 00 | ||||
| 	jamoLBase0 = 0xE1 | ||||
| 	jamoLBase1 = 0x84 | ||||
| 	jamoLEnd   = 0x1113 | ||||
| 	jamoVBase  = 0x1161 | ||||
| 	jamoVEnd   = 0x1176 | ||||
| 	jamoTBase  = 0x11A7 | ||||
| 	jamoTEnd   = 0x11C3 | ||||
|  | ||||
| 	jamoTCount   = 28 | ||||
| 	jamoVCount   = 21 | ||||
| 	jamoVTCount  = 21 * 28 | ||||
| 	jamoLVTCount = 19 * 21 * 28 | ||||
| ) | ||||
|  | ||||
| const hangulUTF8Size = 3 | ||||
|  | ||||
| func isHangul(b []byte) bool { | ||||
| 	if len(b) < hangulUTF8Size { | ||||
| 		return false | ||||
| 	} | ||||
| 	b0 := b[0] | ||||
| 	if b0 < hangulBase0 { | ||||
| 		return false | ||||
| 	} | ||||
| 	b1 := b[1] | ||||
| 	switch { | ||||
| 	case b0 == hangulBase0: | ||||
| 		return b1 >= hangulBase1 | ||||
| 	case b0 < hangulEnd0: | ||||
| 		return true | ||||
| 	case b0 > hangulEnd0: | ||||
| 		return false | ||||
| 	case b1 < hangulEnd1: | ||||
| 		return true | ||||
| 	} | ||||
| 	return b1 == hangulEnd1 && b[2] < hangulEnd2 | ||||
| } | ||||
|  | ||||
| func isHangulString(b string) bool { | ||||
| 	if len(b) < hangulUTF8Size { | ||||
| 		return false | ||||
| 	} | ||||
| 	b0 := b[0] | ||||
| 	if b0 < hangulBase0 { | ||||
| 		return false | ||||
| 	} | ||||
| 	b1 := b[1] | ||||
| 	switch { | ||||
| 	case b0 == hangulBase0: | ||||
| 		return b1 >= hangulBase1 | ||||
| 	case b0 < hangulEnd0: | ||||
| 		return true | ||||
| 	case b0 > hangulEnd0: | ||||
| 		return false | ||||
| 	case b1 < hangulEnd1: | ||||
| 		return true | ||||
| 	} | ||||
| 	return b1 == hangulEnd1 && b[2] < hangulEnd2 | ||||
| } | ||||
|  | ||||
| // Caller must ensure len(b) >= 2. | ||||
| func isJamoVT(b []byte) bool { | ||||
| 	// True if (rune & 0xff00) == jamoLBase | ||||
| 	return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1 | ||||
| } | ||||
|  | ||||
| func isHangulWithoutJamoT(b []byte) bool { | ||||
| 	c, _ := utf8.DecodeRune(b) | ||||
| 	c -= hangulBase | ||||
| 	return c < jamoLVTCount && c%jamoTCount == 0 | ||||
| } | ||||
|  | ||||
| // decomposeHangul writes the decomposed Hangul to buf and returns the number | ||||
| // of bytes written.  len(buf) should be at least 9. | ||||
| func decomposeHangul(buf []byte, r rune) int { | ||||
| 	const JamoUTF8Len = 3 | ||||
| 	r -= hangulBase | ||||
| 	x := r % jamoTCount | ||||
| 	r /= jamoTCount | ||||
| 	utf8.EncodeRune(buf, jamoLBase+r/jamoVCount) | ||||
| 	utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount) | ||||
| 	if x != 0 { | ||||
| 		utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x) | ||||
| 		return 3 * JamoUTF8Len | ||||
| 	} | ||||
| 	return 2 * JamoUTF8Len | ||||
| } | ||||
|  | ||||
| // decomposeHangul algorithmically decomposes a Hangul rune into | ||||
| // its Jamo components. | ||||
| // See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul. | ||||
| func (rb *reorderBuffer) decomposeHangul(r rune) { | ||||
| 	r -= hangulBase | ||||
| 	x := r % jamoTCount | ||||
| 	r /= jamoTCount | ||||
| 	rb.appendRune(jamoLBase + r/jamoVCount) | ||||
| 	rb.appendRune(jamoVBase + r%jamoVCount) | ||||
| 	if x != 0 { | ||||
| 		rb.appendRune(jamoTBase + x) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // combineHangul algorithmically combines Jamo character components into Hangul. | ||||
| // See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul. | ||||
| func (rb *reorderBuffer) combineHangul(s, i, k int) { | ||||
| 	b := rb.rune[:] | ||||
| 	bn := rb.nrune | ||||
| 	for ; i < bn; i++ { | ||||
| 		cccB := b[k-1].ccc | ||||
| 		cccC := b[i].ccc | ||||
| 		if cccB == 0 { | ||||
| 			s = k - 1 | ||||
| 		} | ||||
| 		if s != k-1 && cccB >= cccC { | ||||
| 			// b[i] is blocked by greater-equal cccX below it | ||||
| 			b[k] = b[i] | ||||
| 			k++ | ||||
| 		} else { | ||||
| 			l := rb.runeAt(s) // also used to compare to hangulBase | ||||
| 			v := rb.runeAt(i) // also used to compare to jamoT | ||||
| 			switch { | ||||
| 			case jamoLBase <= l && l < jamoLEnd && | ||||
| 				jamoVBase <= v && v < jamoVEnd: | ||||
| 				// 11xx plus 116x to LV | ||||
| 				rb.assignRune(s, hangulBase+ | ||||
| 					(l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount) | ||||
| 			case hangulBase <= l && l < hangulEnd && | ||||
| 				jamoTBase < v && v < jamoTEnd && | ||||
| 				((l-hangulBase)%jamoTCount) == 0: | ||||
| 				// ACxx plus 11Ax to LVT | ||||
| 				rb.assignRune(s, l+v-jamoTBase) | ||||
| 			default: | ||||
| 				b[k] = b[i] | ||||
| 				k++ | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	rb.nrune = k | ||||
| } | ||||
|  | ||||
| // compose recombines the runes in the buffer. | ||||
| // It should only be used to recompose a single segment, as it will not | ||||
| // handle alternations between Hangul and non-Hangul characters correctly. | ||||
| func (rb *reorderBuffer) compose() { | ||||
| 	// UAX #15, section X5 , including Corrigendum #5 | ||||
| 	// "In any character sequence beginning with starter S, a character C is | ||||
| 	//  blocked from S if and only if there is some character B between S | ||||
| 	//  and C, and either B is a starter or it has the same or higher | ||||
| 	//  combining class as C." | ||||
| 	bn := rb.nrune | ||||
| 	if bn == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	k := 1 | ||||
| 	b := rb.rune[:] | ||||
| 	for s, i := 0, 1; i < bn; i++ { | ||||
| 		if isJamoVT(rb.bytesAt(i)) { | ||||
| 			// Redo from start in Hangul mode. Necessary to support | ||||
| 			// U+320E..U+321E in NFKC mode. | ||||
| 			rb.combineHangul(s, i, k) | ||||
| 			return | ||||
| 		} | ||||
| 		ii := b[i] | ||||
| 		// We can only use combineForward as a filter if we later | ||||
| 		// get the info for the combined character. This is more | ||||
| 		// expensive than using the filter. Using combinesBackward() | ||||
| 		// is safe. | ||||
| 		if ii.combinesBackward() { | ||||
| 			cccB := b[k-1].ccc | ||||
| 			cccC := ii.ccc | ||||
| 			blocked := false // b[i] blocked by starter or greater or equal CCC? | ||||
| 			if cccB == 0 { | ||||
| 				s = k - 1 | ||||
| 			} else { | ||||
| 				blocked = s != k-1 && cccB >= cccC | ||||
| 			} | ||||
| 			if !blocked { | ||||
| 				combined := combine(rb.runeAt(s), rb.runeAt(i)) | ||||
| 				if combined != 0 { | ||||
| 					rb.assignRune(s, combined) | ||||
| 					continue | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		b[k] = b[i] | ||||
| 		k++ | ||||
| 	} | ||||
| 	rb.nrune = k | ||||
| } | ||||
							
								
								
									
										259
									
								
								vendor/golang.org/x/text/unicode/norm/forminfo.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										259
									
								
								vendor/golang.org/x/text/unicode/norm/forminfo.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,259 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package norm | ||||
|  | ||||
| // This file contains Form-specific logic and wrappers for data in tables.go. | ||||
|  | ||||
| // Rune info is stored in a separate trie per composing form. A composing form | ||||
| // and its corresponding decomposing form share the same trie.  Each trie maps | ||||
| // a rune to a uint16. The values take two forms.  For v >= 0x8000: | ||||
| //   bits | ||||
| //   15:    1 (inverse of NFD_QC bit of qcInfo) | ||||
| //   13..7: qcInfo (see below). isYesD is always true (no decompostion). | ||||
| //    6..0: ccc (compressed CCC value). | ||||
| // For v < 0x8000, the respective rune has a decomposition and v is an index | ||||
| // into a byte array of UTF-8 decomposition sequences and additional info and | ||||
| // has the form: | ||||
| //    <header> <decomp_byte>* [<tccc> [<lccc>]] | ||||
| // The header contains the number of bytes in the decomposition (excluding this | ||||
| // length byte). The two most significant bits of this length byte correspond | ||||
| // to bit 5 and 4 of qcInfo (see below).  The byte sequence itself starts at v+1. | ||||
| // The byte sequence is followed by a trailing and leading CCC if the values | ||||
| // for these are not zero.  The value of v determines which ccc are appended | ||||
| // to the sequences.  For v < firstCCC, there are none, for v >= firstCCC, | ||||
| // the sequence is followed by a trailing ccc, and for v >= firstLeadingCC | ||||
| // there is an additional leading ccc. The value of tccc itself is the | ||||
| // trailing CCC shifted left 2 bits. The two least-significant bits of tccc | ||||
| // are the number of trailing non-starters. | ||||
|  | ||||
| const ( | ||||
| 	qcInfoMask      = 0x3F // to clear all but the relevant bits in a qcInfo | ||||
| 	headerLenMask   = 0x3F // extract the length value from the header byte | ||||
| 	headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte | ||||
| ) | ||||
|  | ||||
| // Properties provides access to normalization properties of a rune. | ||||
| type Properties struct { | ||||
| 	pos   uint8  // start position in reorderBuffer; used in composition.go | ||||
| 	size  uint8  // length of UTF-8 encoding of this rune | ||||
| 	ccc   uint8  // leading canonical combining class (ccc if not decomposition) | ||||
| 	tccc  uint8  // trailing canonical combining class (ccc if not decomposition) | ||||
| 	nLead uint8  // number of leading non-starters. | ||||
| 	flags qcInfo // quick check flags | ||||
| 	index uint16 | ||||
| } | ||||
|  | ||||
| // functions dispatchable per form | ||||
| type lookupFunc func(b input, i int) Properties | ||||
|  | ||||
| // formInfo holds Form-specific functions and tables. | ||||
| type formInfo struct { | ||||
| 	form                     Form | ||||
| 	composing, compatibility bool // form type | ||||
| 	info                     lookupFunc | ||||
| 	nextMain                 iterFunc | ||||
| } | ||||
|  | ||||
| var formTable = []*formInfo{{ | ||||
| 	form:          NFC, | ||||
| 	composing:     true, | ||||
| 	compatibility: false, | ||||
| 	info:          lookupInfoNFC, | ||||
| 	nextMain:      nextComposed, | ||||
| }, { | ||||
| 	form:          NFD, | ||||
| 	composing:     false, | ||||
| 	compatibility: false, | ||||
| 	info:          lookupInfoNFC, | ||||
| 	nextMain:      nextDecomposed, | ||||
| }, { | ||||
| 	form:          NFKC, | ||||
| 	composing:     true, | ||||
| 	compatibility: true, | ||||
| 	info:          lookupInfoNFKC, | ||||
| 	nextMain:      nextComposed, | ||||
| }, { | ||||
| 	form:          NFKD, | ||||
| 	composing:     false, | ||||
| 	compatibility: true, | ||||
| 	info:          lookupInfoNFKC, | ||||
| 	nextMain:      nextDecomposed, | ||||
| }} | ||||
|  | ||||
| // We do not distinguish between boundaries for NFC, NFD, etc. to avoid | ||||
| // unexpected behavior for the user.  For example, in NFD, there is a boundary | ||||
| // after 'a'.  However, 'a' might combine with modifiers, so from the application's | ||||
| // perspective it is not a good boundary. We will therefore always use the | ||||
| // boundaries for the combining variants. | ||||
|  | ||||
| // BoundaryBefore returns true if this rune starts a new segment and | ||||
| // cannot combine with any rune on the left. | ||||
| func (p Properties) BoundaryBefore() bool { | ||||
| 	if p.ccc == 0 && !p.combinesBackward() { | ||||
| 		return true | ||||
| 	} | ||||
| 	// We assume that the CCC of the first character in a decomposition | ||||
| 	// is always non-zero if different from info.ccc and that we can return | ||||
| 	// false at this point. This is verified by maketables. | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // BoundaryAfter returns true if runes cannot combine with or otherwise | ||||
| // interact with this or previous runes. | ||||
| func (p Properties) BoundaryAfter() bool { | ||||
| 	// TODO: loosen these conditions. | ||||
| 	return p.isInert() | ||||
| } | ||||
|  | ||||
| // We pack quick check data in 4 bits: | ||||
| //   5:    Combines forward  (0 == false, 1 == true) | ||||
| //   4..3: NFC_QC Yes(00), No (10), or Maybe (11) | ||||
| //   2:    NFD_QC Yes (0) or No (1). No also means there is a decomposition. | ||||
| //   1..0: Number of trailing non-starters. | ||||
| // | ||||
| // When all 4 bits are zero, the character is inert, meaning it is never | ||||
| // influenced by normalization. | ||||
| type qcInfo uint8 | ||||
|  | ||||
| func (p Properties) isYesC() bool { return p.flags&0x10 == 0 } | ||||
| func (p Properties) isYesD() bool { return p.flags&0x4 == 0 } | ||||
|  | ||||
| func (p Properties) combinesForward() bool  { return p.flags&0x20 != 0 } | ||||
| func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe | ||||
| func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD | ||||
|  | ||||
| func (p Properties) isInert() bool { | ||||
| 	return p.flags&qcInfoMask == 0 && p.ccc == 0 | ||||
| } | ||||
|  | ||||
| func (p Properties) multiSegment() bool { | ||||
| 	return p.index >= firstMulti && p.index < endMulti | ||||
| } | ||||
|  | ||||
| func (p Properties) nLeadingNonStarters() uint8 { | ||||
| 	return p.nLead | ||||
| } | ||||
|  | ||||
| func (p Properties) nTrailingNonStarters() uint8 { | ||||
| 	return uint8(p.flags & 0x03) | ||||
| } | ||||
|  | ||||
| // Decomposition returns the decomposition for the underlying rune | ||||
| // or nil if there is none. | ||||
| func (p Properties) Decomposition() []byte { | ||||
| 	// TODO: create the decomposition for Hangul? | ||||
| 	if p.index == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	i := p.index | ||||
| 	n := decomps[i] & headerLenMask | ||||
| 	i++ | ||||
| 	return decomps[i : i+uint16(n)] | ||||
| } | ||||
|  | ||||
| // Size returns the length of UTF-8 encoding of the rune. | ||||
| func (p Properties) Size() int { | ||||
| 	return int(p.size) | ||||
| } | ||||
|  | ||||
| // CCC returns the canonical combining class of the underlying rune. | ||||
| func (p Properties) CCC() uint8 { | ||||
| 	if p.index >= firstCCCZeroExcept { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return ccc[p.ccc] | ||||
| } | ||||
|  | ||||
| // LeadCCC returns the CCC of the first rune in the decomposition. | ||||
| // If there is no decomposition, LeadCCC equals CCC. | ||||
| func (p Properties) LeadCCC() uint8 { | ||||
| 	return ccc[p.ccc] | ||||
| } | ||||
|  | ||||
| // TrailCCC returns the CCC of the last rune in the decomposition. | ||||
| // If there is no decomposition, TrailCCC equals CCC. | ||||
| func (p Properties) TrailCCC() uint8 { | ||||
| 	return ccc[p.tccc] | ||||
| } | ||||
|  | ||||
| // Recomposition | ||||
| // We use 32-bit keys instead of 64-bit for the two codepoint keys. | ||||
| // This clips off the bits of three entries, but we know this will not | ||||
| // result in a collision. In the unlikely event that changes to | ||||
| // UnicodeData.txt introduce collisions, the compiler will catch it. | ||||
| // Note that the recomposition map for NFC and NFKC are identical. | ||||
|  | ||||
| // combine returns the combined rune or 0 if it doesn't exist. | ||||
| func combine(a, b rune) rune { | ||||
| 	key := uint32(uint16(a))<<16 + uint32(uint16(b)) | ||||
| 	return recompMap[key] | ||||
| } | ||||
|  | ||||
| func lookupInfoNFC(b input, i int) Properties { | ||||
| 	v, sz := b.charinfoNFC(i) | ||||
| 	return compInfo(v, sz) | ||||
| } | ||||
|  | ||||
| func lookupInfoNFKC(b input, i int) Properties { | ||||
| 	v, sz := b.charinfoNFKC(i) | ||||
| 	return compInfo(v, sz) | ||||
| } | ||||
|  | ||||
| // Properties returns properties for the first rune in s. | ||||
| func (f Form) Properties(s []byte) Properties { | ||||
| 	if f == NFC || f == NFD { | ||||
| 		return compInfo(nfcData.lookup(s)) | ||||
| 	} | ||||
| 	return compInfo(nfkcData.lookup(s)) | ||||
| } | ||||
|  | ||||
| // PropertiesString returns properties for the first rune in s. | ||||
| func (f Form) PropertiesString(s string) Properties { | ||||
| 	if f == NFC || f == NFD { | ||||
| 		return compInfo(nfcData.lookupString(s)) | ||||
| 	} | ||||
| 	return compInfo(nfkcData.lookupString(s)) | ||||
| } | ||||
|  | ||||
| // compInfo converts the information contained in v and sz | ||||
| // to a Properties.  See the comment at the top of the file | ||||
| // for more information on the format. | ||||
| func compInfo(v uint16, sz int) Properties { | ||||
| 	if v == 0 { | ||||
| 		return Properties{size: uint8(sz)} | ||||
| 	} else if v >= 0x8000 { | ||||
| 		p := Properties{ | ||||
| 			size:  uint8(sz), | ||||
| 			ccc:   uint8(v), | ||||
| 			tccc:  uint8(v), | ||||
| 			flags: qcInfo(v >> 8), | ||||
| 		} | ||||
| 		if p.ccc > 0 || p.combinesBackward() { | ||||
| 			p.nLead = uint8(p.flags & 0x3) | ||||
| 		} | ||||
| 		return p | ||||
| 	} | ||||
| 	// has decomposition | ||||
| 	h := decomps[v] | ||||
| 	f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4 | ||||
| 	p := Properties{size: uint8(sz), flags: f, index: v} | ||||
| 	if v >= firstCCC { | ||||
| 		v += uint16(h&headerLenMask) + 1 | ||||
| 		c := decomps[v] | ||||
| 		p.tccc = c >> 2 | ||||
| 		p.flags |= qcInfo(c & 0x3) | ||||
| 		if v >= firstLeadingCCC { | ||||
| 			p.nLead = c & 0x3 | ||||
| 			if v >= firstStarterWithNLead { | ||||
| 				// We were tricked. Remove the decomposition. | ||||
| 				p.flags &= 0x03 | ||||
| 				p.index = 0 | ||||
| 				return p | ||||
| 			} | ||||
| 			p.ccc = decomps[v+1] | ||||
| 		} | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
							
								
								
									
										109
									
								
								vendor/golang.org/x/text/unicode/norm/input.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								vendor/golang.org/x/text/unicode/norm/input.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package norm | ||||
|  | ||||
| import "unicode/utf8" | ||||
|  | ||||
| type input struct { | ||||
| 	str   string | ||||
| 	bytes []byte | ||||
| } | ||||
|  | ||||
| func inputBytes(str []byte) input { | ||||
| 	return input{bytes: str} | ||||
| } | ||||
|  | ||||
| func inputString(str string) input { | ||||
| 	return input{str: str} | ||||
| } | ||||
|  | ||||
| func (in *input) setBytes(str []byte) { | ||||
| 	in.str = "" | ||||
| 	in.bytes = str | ||||
| } | ||||
|  | ||||
| func (in *input) setString(str string) { | ||||
| 	in.str = str | ||||
| 	in.bytes = nil | ||||
| } | ||||
|  | ||||
| func (in *input) _byte(p int) byte { | ||||
| 	if in.bytes == nil { | ||||
| 		return in.str[p] | ||||
| 	} | ||||
| 	return in.bytes[p] | ||||
| } | ||||
|  | ||||
| func (in *input) skipASCII(p, max int) int { | ||||
| 	if in.bytes == nil { | ||||
| 		for ; p < max && in.str[p] < utf8.RuneSelf; p++ { | ||||
| 		} | ||||
| 	} else { | ||||
| 		for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ { | ||||
| 		} | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| func (in *input) skipContinuationBytes(p int) int { | ||||
| 	if in.bytes == nil { | ||||
| 		for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ { | ||||
| 		} | ||||
| 	} else { | ||||
| 		for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ { | ||||
| 		} | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| func (in *input) appendSlice(buf []byte, b, e int) []byte { | ||||
| 	if in.bytes != nil { | ||||
| 		return append(buf, in.bytes[b:e]...) | ||||
| 	} | ||||
| 	for i := b; i < e; i++ { | ||||
| 		buf = append(buf, in.str[i]) | ||||
| 	} | ||||
| 	return buf | ||||
| } | ||||
|  | ||||
| func (in *input) copySlice(buf []byte, b, e int) int { | ||||
| 	if in.bytes == nil { | ||||
| 		return copy(buf, in.str[b:e]) | ||||
| 	} | ||||
| 	return copy(buf, in.bytes[b:e]) | ||||
| } | ||||
|  | ||||
| func (in *input) charinfoNFC(p int) (uint16, int) { | ||||
| 	if in.bytes == nil { | ||||
| 		return nfcData.lookupString(in.str[p:]) | ||||
| 	} | ||||
| 	return nfcData.lookup(in.bytes[p:]) | ||||
| } | ||||
|  | ||||
| func (in *input) charinfoNFKC(p int) (uint16, int) { | ||||
| 	if in.bytes == nil { | ||||
| 		return nfkcData.lookupString(in.str[p:]) | ||||
| 	} | ||||
| 	return nfkcData.lookup(in.bytes[p:]) | ||||
| } | ||||
|  | ||||
| func (in *input) hangul(p int) (r rune) { | ||||
| 	var size int | ||||
| 	if in.bytes == nil { | ||||
| 		if !isHangulString(in.str[p:]) { | ||||
| 			return 0 | ||||
| 		} | ||||
| 		r, size = utf8.DecodeRuneInString(in.str[p:]) | ||||
| 	} else { | ||||
| 		if !isHangul(in.bytes[p:]) { | ||||
| 			return 0 | ||||
| 		} | ||||
| 		r, size = utf8.DecodeRune(in.bytes[p:]) | ||||
| 	} | ||||
| 	if size != hangulUTF8Size { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return r | ||||
| } | ||||
							
								
								
									
										457
									
								
								vendor/golang.org/x/text/unicode/norm/iter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										457
									
								
								vendor/golang.org/x/text/unicode/norm/iter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,457 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package norm | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| // MaxSegmentSize is the maximum size of a byte buffer needed to consider any | ||||
| // sequence of starter and non-starter runes for the purpose of normalization. | ||||
| const MaxSegmentSize = maxByteBufferSize | ||||
|  | ||||
| // An Iter iterates over a string or byte slice, while normalizing it | ||||
| // to a given Form. | ||||
| type Iter struct { | ||||
| 	rb     reorderBuffer | ||||
| 	buf    [maxByteBufferSize]byte | ||||
| 	info   Properties // first character saved from previous iteration | ||||
| 	next   iterFunc   // implementation of next depends on form | ||||
| 	asciiF iterFunc | ||||
|  | ||||
| 	p        int    // current position in input source | ||||
| 	multiSeg []byte // remainder of multi-segment decomposition | ||||
| } | ||||
|  | ||||
| type iterFunc func(*Iter) []byte | ||||
|  | ||||
| // Init initializes i to iterate over src after normalizing it to Form f. | ||||
| func (i *Iter) Init(f Form, src []byte) { | ||||
| 	i.p = 0 | ||||
| 	if len(src) == 0 { | ||||
| 		i.setDone() | ||||
| 		i.rb.nsrc = 0 | ||||
| 		return | ||||
| 	} | ||||
| 	i.multiSeg = nil | ||||
| 	i.rb.init(f, src) | ||||
| 	i.next = i.rb.f.nextMain | ||||
| 	i.asciiF = nextASCIIBytes | ||||
| 	i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 	i.rb.ss.first(i.info) | ||||
| } | ||||
|  | ||||
| // InitString initializes i to iterate over src after normalizing it to Form f. | ||||
| func (i *Iter) InitString(f Form, src string) { | ||||
| 	i.p = 0 | ||||
| 	if len(src) == 0 { | ||||
| 		i.setDone() | ||||
| 		i.rb.nsrc = 0 | ||||
| 		return | ||||
| 	} | ||||
| 	i.multiSeg = nil | ||||
| 	i.rb.initString(f, src) | ||||
| 	i.next = i.rb.f.nextMain | ||||
| 	i.asciiF = nextASCIIString | ||||
| 	i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 	i.rb.ss.first(i.info) | ||||
| } | ||||
|  | ||||
| // Seek sets the segment to be returned by the next call to Next to start | ||||
| // at position p.  It is the responsibility of the caller to set p to the | ||||
| // start of a segment. | ||||
| func (i *Iter) Seek(offset int64, whence int) (int64, error) { | ||||
| 	var abs int64 | ||||
| 	switch whence { | ||||
| 	case 0: | ||||
| 		abs = offset | ||||
| 	case 1: | ||||
| 		abs = int64(i.p) + offset | ||||
| 	case 2: | ||||
| 		abs = int64(i.rb.nsrc) + offset | ||||
| 	default: | ||||
| 		return 0, fmt.Errorf("norm: invalid whence") | ||||
| 	} | ||||
| 	if abs < 0 { | ||||
| 		return 0, fmt.Errorf("norm: negative position") | ||||
| 	} | ||||
| 	if int(abs) >= i.rb.nsrc { | ||||
| 		i.setDone() | ||||
| 		return int64(i.p), nil | ||||
| 	} | ||||
| 	i.p = int(abs) | ||||
| 	i.multiSeg = nil | ||||
| 	i.next = i.rb.f.nextMain | ||||
| 	i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 	i.rb.ss.first(i.info) | ||||
| 	return abs, nil | ||||
| } | ||||
|  | ||||
| // returnSlice returns a slice of the underlying input type as a byte slice. | ||||
| // If the underlying is of type []byte, it will simply return a slice. | ||||
| // If the underlying is of type string, it will copy the slice to the buffer | ||||
| // and return that. | ||||
| func (i *Iter) returnSlice(a, b int) []byte { | ||||
| 	if i.rb.src.bytes == nil { | ||||
| 		return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])] | ||||
| 	} | ||||
| 	return i.rb.src.bytes[a:b] | ||||
| } | ||||
|  | ||||
| // Pos returns the byte position at which the next call to Next will commence processing. | ||||
| func (i *Iter) Pos() int { | ||||
| 	return i.p | ||||
| } | ||||
|  | ||||
| func (i *Iter) setDone() { | ||||
| 	i.next = nextDone | ||||
| 	i.p = i.rb.nsrc | ||||
| } | ||||
|  | ||||
| // Done returns true if there is no more input to process. | ||||
| func (i *Iter) Done() bool { | ||||
| 	return i.p >= i.rb.nsrc | ||||
| } | ||||
|  | ||||
| // Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input. | ||||
| // For any input a and b for which f(a) == f(b), subsequent calls | ||||
| // to Next will return the same segments. | ||||
| // Modifying runes are grouped together with the preceding starter, if such a starter exists. | ||||
| // Although not guaranteed, n will typically be the smallest possible n. | ||||
| func (i *Iter) Next() []byte { | ||||
| 	return i.next(i) | ||||
| } | ||||
|  | ||||
| func nextASCIIBytes(i *Iter) []byte { | ||||
| 	p := i.p + 1 | ||||
| 	if p >= i.rb.nsrc { | ||||
| 		i.setDone() | ||||
| 		return i.rb.src.bytes[i.p:p] | ||||
| 	} | ||||
| 	if i.rb.src.bytes[p] < utf8.RuneSelf { | ||||
| 		p0 := i.p | ||||
| 		i.p = p | ||||
| 		return i.rb.src.bytes[p0:p] | ||||
| 	} | ||||
| 	i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 	i.next = i.rb.f.nextMain | ||||
| 	return i.next(i) | ||||
| } | ||||
|  | ||||
| func nextASCIIString(i *Iter) []byte { | ||||
| 	p := i.p + 1 | ||||
| 	if p >= i.rb.nsrc { | ||||
| 		i.buf[0] = i.rb.src.str[i.p] | ||||
| 		i.setDone() | ||||
| 		return i.buf[:1] | ||||
| 	} | ||||
| 	if i.rb.src.str[p] < utf8.RuneSelf { | ||||
| 		i.buf[0] = i.rb.src.str[i.p] | ||||
| 		i.p = p | ||||
| 		return i.buf[:1] | ||||
| 	} | ||||
| 	i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 	i.next = i.rb.f.nextMain | ||||
| 	return i.next(i) | ||||
| } | ||||
|  | ||||
| func nextHangul(i *Iter) []byte { | ||||
| 	p := i.p | ||||
| 	next := p + hangulUTF8Size | ||||
| 	if next >= i.rb.nsrc { | ||||
| 		i.setDone() | ||||
| 	} else if i.rb.src.hangul(next) == 0 { | ||||
| 		i.rb.ss.next(i.info) | ||||
| 		i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 		i.next = i.rb.f.nextMain | ||||
| 		return i.next(i) | ||||
| 	} | ||||
| 	i.p = next | ||||
| 	return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))] | ||||
| } | ||||
|  | ||||
| func nextDone(i *Iter) []byte { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // nextMulti is used for iterating over multi-segment decompositions | ||||
| // for decomposing normal forms. | ||||
| func nextMulti(i *Iter) []byte { | ||||
| 	j := 0 | ||||
| 	d := i.multiSeg | ||||
| 	// skip first rune | ||||
| 	for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ { | ||||
| 	} | ||||
| 	for j < len(d) { | ||||
| 		info := i.rb.f.info(input{bytes: d}, j) | ||||
| 		if info.BoundaryBefore() { | ||||
| 			i.multiSeg = d[j:] | ||||
| 			return d[:j] | ||||
| 		} | ||||
| 		j += int(info.size) | ||||
| 	} | ||||
| 	// treat last segment as normal decomposition | ||||
| 	i.next = i.rb.f.nextMain | ||||
| 	return i.next(i) | ||||
| } | ||||
|  | ||||
| // nextMultiNorm is used for iterating over multi-segment decompositions | ||||
| // for composing normal forms. | ||||
| func nextMultiNorm(i *Iter) []byte { | ||||
| 	j := 0 | ||||
| 	d := i.multiSeg | ||||
| 	for j < len(d) { | ||||
| 		info := i.rb.f.info(input{bytes: d}, j) | ||||
| 		if info.BoundaryBefore() { | ||||
| 			i.rb.compose() | ||||
| 			seg := i.buf[:i.rb.flushCopy(i.buf[:])] | ||||
| 			i.rb.insertUnsafe(input{bytes: d}, j, info) | ||||
| 			i.multiSeg = d[j+int(info.size):] | ||||
| 			return seg | ||||
| 		} | ||||
| 		i.rb.insertUnsafe(input{bytes: d}, j, info) | ||||
| 		j += int(info.size) | ||||
| 	} | ||||
| 	i.multiSeg = nil | ||||
| 	i.next = nextComposed | ||||
| 	return doNormComposed(i) | ||||
| } | ||||
|  | ||||
| // nextDecomposed is the implementation of Next for forms NFD and NFKD. | ||||
| func nextDecomposed(i *Iter) (next []byte) { | ||||
| 	outp := 0 | ||||
| 	inCopyStart, outCopyStart := i.p, 0 | ||||
| 	for { | ||||
| 		if sz := int(i.info.size); sz <= 1 { | ||||
| 			i.rb.ss = 0 | ||||
| 			p := i.p | ||||
| 			i.p++ // ASCII or illegal byte.  Either way, advance by 1. | ||||
| 			if i.p >= i.rb.nsrc { | ||||
| 				i.setDone() | ||||
| 				return i.returnSlice(p, i.p) | ||||
| 			} else if i.rb.src._byte(i.p) < utf8.RuneSelf { | ||||
| 				i.next = i.asciiF | ||||
| 				return i.returnSlice(p, i.p) | ||||
| 			} | ||||
| 			outp++ | ||||
| 		} else if d := i.info.Decomposition(); d != nil { | ||||
| 			// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero. | ||||
| 			// Case 1: there is a leftover to copy.  In this case the decomposition | ||||
| 			// must begin with a modifier and should always be appended. | ||||
| 			// Case 2: no leftover. Simply return d if followed by a ccc == 0 value. | ||||
| 			p := outp + len(d) | ||||
| 			if outp > 0 { | ||||
| 				i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) | ||||
| 				// TODO: this condition should not be possible, but we leave it | ||||
| 				// in for defensive purposes. | ||||
| 				if p > len(i.buf) { | ||||
| 					return i.buf[:outp] | ||||
| 				} | ||||
| 			} else if i.info.multiSegment() { | ||||
| 				// outp must be 0 as multi-segment decompositions always | ||||
| 				// start a new segment. | ||||
| 				if i.multiSeg == nil { | ||||
| 					i.multiSeg = d | ||||
| 					i.next = nextMulti | ||||
| 					return nextMulti(i) | ||||
| 				} | ||||
| 				// We are in the last segment.  Treat as normal decomposition. | ||||
| 				d = i.multiSeg | ||||
| 				i.multiSeg = nil | ||||
| 				p = len(d) | ||||
| 			} | ||||
| 			prevCC := i.info.tccc | ||||
| 			if i.p += sz; i.p >= i.rb.nsrc { | ||||
| 				i.setDone() | ||||
| 				i.info = Properties{} // Force BoundaryBefore to succeed. | ||||
| 			} else { | ||||
| 				i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 			} | ||||
| 			switch i.rb.ss.next(i.info) { | ||||
| 			case ssOverflow: | ||||
| 				i.next = nextCGJDecompose | ||||
| 				fallthrough | ||||
| 			case ssStarter: | ||||
| 				if outp > 0 { | ||||
| 					copy(i.buf[outp:], d) | ||||
| 					return i.buf[:p] | ||||
| 				} | ||||
| 				return d | ||||
| 			} | ||||
| 			copy(i.buf[outp:], d) | ||||
| 			outp = p | ||||
| 			inCopyStart, outCopyStart = i.p, outp | ||||
| 			if i.info.ccc < prevCC { | ||||
| 				goto doNorm | ||||
| 			} | ||||
| 			continue | ||||
| 		} else if r := i.rb.src.hangul(i.p); r != 0 { | ||||
| 			outp = decomposeHangul(i.buf[:], r) | ||||
| 			i.p += hangulUTF8Size | ||||
| 			inCopyStart, outCopyStart = i.p, outp | ||||
| 			if i.p >= i.rb.nsrc { | ||||
| 				i.setDone() | ||||
| 				break | ||||
| 			} else if i.rb.src.hangul(i.p) != 0 { | ||||
| 				i.next = nextHangul | ||||
| 				return i.buf[:outp] | ||||
| 			} | ||||
| 		} else { | ||||
| 			p := outp + sz | ||||
| 			if p > len(i.buf) { | ||||
| 				break | ||||
| 			} | ||||
| 			outp = p | ||||
| 			i.p += sz | ||||
| 		} | ||||
| 		if i.p >= i.rb.nsrc { | ||||
| 			i.setDone() | ||||
| 			break | ||||
| 		} | ||||
| 		prevCC := i.info.tccc | ||||
| 		i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 		if v := i.rb.ss.next(i.info); v == ssStarter { | ||||
| 			break | ||||
| 		} else if v == ssOverflow { | ||||
| 			i.next = nextCGJDecompose | ||||
| 			break | ||||
| 		} | ||||
| 		if i.info.ccc < prevCC { | ||||
| 			goto doNorm | ||||
| 		} | ||||
| 	} | ||||
| 	if outCopyStart == 0 { | ||||
| 		return i.returnSlice(inCopyStart, i.p) | ||||
| 	} else if inCopyStart < i.p { | ||||
| 		i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) | ||||
| 	} | ||||
| 	return i.buf[:outp] | ||||
| doNorm: | ||||
| 	// Insert what we have decomposed so far in the reorderBuffer. | ||||
| 	// As we will only reorder, there will always be enough room. | ||||
| 	i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) | ||||
| 	i.rb.insertDecomposed(i.buf[0:outp]) | ||||
| 	return doNormDecomposed(i) | ||||
| } | ||||
|  | ||||
| func doNormDecomposed(i *Iter) []byte { | ||||
| 	for { | ||||
| 		i.rb.insertUnsafe(i.rb.src, i.p, i.info) | ||||
| 		if i.p += int(i.info.size); i.p >= i.rb.nsrc { | ||||
| 			i.setDone() | ||||
| 			break | ||||
| 		} | ||||
| 		i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 		if i.info.ccc == 0 { | ||||
| 			break | ||||
| 		} | ||||
| 		if s := i.rb.ss.next(i.info); s == ssOverflow { | ||||
| 			i.next = nextCGJDecompose | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	// new segment or too many combining characters: exit normalization | ||||
| 	return i.buf[:i.rb.flushCopy(i.buf[:])] | ||||
| } | ||||
|  | ||||
| func nextCGJDecompose(i *Iter) []byte { | ||||
| 	i.rb.ss = 0 | ||||
| 	i.rb.insertCGJ() | ||||
| 	i.next = nextDecomposed | ||||
| 	i.rb.ss.first(i.info) | ||||
| 	buf := doNormDecomposed(i) | ||||
| 	return buf | ||||
| } | ||||
|  | ||||
| // nextComposed is the implementation of Next for forms NFC and NFKC. | ||||
| func nextComposed(i *Iter) []byte { | ||||
| 	outp, startp := 0, i.p | ||||
| 	var prevCC uint8 | ||||
| 	for { | ||||
| 		if !i.info.isYesC() { | ||||
| 			goto doNorm | ||||
| 		} | ||||
| 		prevCC = i.info.tccc | ||||
| 		sz := int(i.info.size) | ||||
| 		if sz == 0 { | ||||
| 			sz = 1 // illegal rune: copy byte-by-byte | ||||
| 		} | ||||
| 		p := outp + sz | ||||
| 		if p > len(i.buf) { | ||||
| 			break | ||||
| 		} | ||||
| 		outp = p | ||||
| 		i.p += sz | ||||
| 		if i.p >= i.rb.nsrc { | ||||
| 			i.setDone() | ||||
| 			break | ||||
| 		} else if i.rb.src._byte(i.p) < utf8.RuneSelf { | ||||
| 			i.rb.ss = 0 | ||||
| 			i.next = i.asciiF | ||||
| 			break | ||||
| 		} | ||||
| 		i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 		if v := i.rb.ss.next(i.info); v == ssStarter { | ||||
| 			break | ||||
| 		} else if v == ssOverflow { | ||||
| 			i.next = nextCGJCompose | ||||
| 			break | ||||
| 		} | ||||
| 		if i.info.ccc < prevCC { | ||||
| 			goto doNorm | ||||
| 		} | ||||
| 	} | ||||
| 	return i.returnSlice(startp, i.p) | ||||
| doNorm: | ||||
| 	// reset to start position | ||||
| 	i.p = startp | ||||
| 	i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 	i.rb.ss.first(i.info) | ||||
| 	if i.info.multiSegment() { | ||||
| 		d := i.info.Decomposition() | ||||
| 		info := i.rb.f.info(input{bytes: d}, 0) | ||||
| 		i.rb.insertUnsafe(input{bytes: d}, 0, info) | ||||
| 		i.multiSeg = d[int(info.size):] | ||||
| 		i.next = nextMultiNorm | ||||
| 		return nextMultiNorm(i) | ||||
| 	} | ||||
| 	i.rb.ss.first(i.info) | ||||
| 	i.rb.insertUnsafe(i.rb.src, i.p, i.info) | ||||
| 	return doNormComposed(i) | ||||
| } | ||||
|  | ||||
| func doNormComposed(i *Iter) []byte { | ||||
| 	// First rune should already be inserted. | ||||
| 	for { | ||||
| 		if i.p += int(i.info.size); i.p >= i.rb.nsrc { | ||||
| 			i.setDone() | ||||
| 			break | ||||
| 		} | ||||
| 		i.info = i.rb.f.info(i.rb.src, i.p) | ||||
| 		if s := i.rb.ss.next(i.info); s == ssStarter { | ||||
| 			break | ||||
| 		} else if s == ssOverflow { | ||||
| 			i.next = nextCGJCompose | ||||
| 			break | ||||
| 		} | ||||
| 		i.rb.insertUnsafe(i.rb.src, i.p, i.info) | ||||
| 	} | ||||
| 	i.rb.compose() | ||||
| 	seg := i.buf[:i.rb.flushCopy(i.buf[:])] | ||||
| 	return seg | ||||
| } | ||||
|  | ||||
| func nextCGJCompose(i *Iter) []byte { | ||||
| 	i.rb.ss = 0 // instead of first | ||||
| 	i.rb.insertCGJ() | ||||
| 	i.next = nextComposed | ||||
| 	// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter, | ||||
| 	// even if they are not. This is particularly dubious for U+FF9E and UFF9A. | ||||
| 	// If we ever change that, insert a check here. | ||||
| 	i.rb.ss.first(i.info) | ||||
| 	i.rb.insertUnsafe(i.rb.src, i.p, i.info) | ||||
| 	return doNormComposed(i) | ||||
| } | ||||
							
								
								
									
										976
									
								
								vendor/golang.org/x/text/unicode/norm/maketables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										976
									
								
								vendor/golang.org/x/text/unicode/norm/maketables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,976 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| // Normalization table generator. | ||||
| // Data read from the web. | ||||
| // See forminfo.go for a description of the trie values associated with each rune. | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"golang.org/x/text/internal/gen" | ||||
| 	"golang.org/x/text/internal/triegen" | ||||
| 	"golang.org/x/text/internal/ucd" | ||||
| ) | ||||
|  | ||||
| func main() { | ||||
| 	gen.Init() | ||||
| 	loadUnicodeData() | ||||
| 	compactCCC() | ||||
| 	loadCompositionExclusions() | ||||
| 	completeCharFields(FCanonical) | ||||
| 	completeCharFields(FCompatibility) | ||||
| 	computeNonStarterCounts() | ||||
| 	verifyComputed() | ||||
| 	printChars() | ||||
| 	testDerived() | ||||
| 	printTestdata() | ||||
| 	makeTables() | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	tablelist = flag.String("tables", | ||||
| 		"all", | ||||
| 		"comma-separated list of which tables to generate; "+ | ||||
| 			"can be 'decomp', 'recomp', 'info' and 'all'") | ||||
| 	test = flag.Bool("test", | ||||
| 		false, | ||||
| 		"test existing tables against DerivedNormalizationProps and generate test data for regression testing") | ||||
| 	verbose = flag.Bool("verbose", | ||||
| 		false, | ||||
| 		"write data to stdout as it is parsed") | ||||
| ) | ||||
|  | ||||
| const MaxChar = 0x10FFFF // anything above this shouldn't exist | ||||
|  | ||||
| // Quick Check properties of runes allow us to quickly | ||||
| // determine whether a rune may occur in a normal form. | ||||
| // For a given normal form, a rune may be guaranteed to occur | ||||
| // verbatim (QC=Yes), may or may not combine with another | ||||
| // rune (QC=Maybe), or may not occur (QC=No). | ||||
| type QCResult int | ||||
|  | ||||
| const ( | ||||
| 	QCUnknown QCResult = iota | ||||
| 	QCYes | ||||
| 	QCNo | ||||
| 	QCMaybe | ||||
| ) | ||||
|  | ||||
| func (r QCResult) String() string { | ||||
| 	switch r { | ||||
| 	case QCYes: | ||||
| 		return "Yes" | ||||
| 	case QCNo: | ||||
| 		return "No" | ||||
| 	case QCMaybe: | ||||
| 		return "Maybe" | ||||
| 	} | ||||
| 	return "***UNKNOWN***" | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	FCanonical     = iota // NFC or NFD | ||||
| 	FCompatibility        // NFKC or NFKD | ||||
| 	FNumberOfFormTypes | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	MComposed   = iota // NFC or NFKC | ||||
| 	MDecomposed        // NFD or NFKD | ||||
| 	MNumberOfModes | ||||
| ) | ||||
|  | ||||
| // This contains only the properties we're interested in. | ||||
| type Char struct { | ||||
| 	name          string | ||||
| 	codePoint     rune  // if zero, this index is not a valid code point. | ||||
| 	ccc           uint8 // canonical combining class | ||||
| 	origCCC       uint8 | ||||
| 	excludeInComp bool // from CompositionExclusions.txt | ||||
| 	compatDecomp  bool // it has a compatibility expansion | ||||
|  | ||||
| 	nTrailingNonStarters uint8 | ||||
| 	nLeadingNonStarters  uint8 // must be equal to trailing if non-zero | ||||
|  | ||||
| 	forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility | ||||
|  | ||||
| 	state State | ||||
| } | ||||
|  | ||||
| var chars = make([]Char, MaxChar+1) | ||||
| var cccMap = make(map[uint8]uint8) | ||||
|  | ||||
| func (c Char) String() string { | ||||
| 	buf := new(bytes.Buffer) | ||||
|  | ||||
| 	fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) | ||||
| 	fmt.Fprintf(buf, "  ccc: %v\n", c.ccc) | ||||
| 	fmt.Fprintf(buf, "  excludeInComp: %v\n", c.excludeInComp) | ||||
| 	fmt.Fprintf(buf, "  compatDecomp: %v\n", c.compatDecomp) | ||||
| 	fmt.Fprintf(buf, "  state: %v\n", c.state) | ||||
| 	fmt.Fprintf(buf, "  NFC:\n") | ||||
| 	fmt.Fprint(buf, c.forms[FCanonical]) | ||||
| 	fmt.Fprintf(buf, "  NFKC:\n") | ||||
| 	fmt.Fprint(buf, c.forms[FCompatibility]) | ||||
|  | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // In UnicodeData.txt, some ranges are marked like this: | ||||
| //	3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;; | ||||
| //	4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;; | ||||
| // parseCharacter keeps a state variable indicating the weirdness. | ||||
| type State int | ||||
|  | ||||
| const ( | ||||
| 	SNormal State = iota // known to be zero for the type | ||||
| 	SFirst | ||||
| 	SLast | ||||
| 	SMissing | ||||
| ) | ||||
|  | ||||
| var lastChar = rune('\u0000') | ||||
|  | ||||
| func (c Char) isValid() bool { | ||||
| 	return c.codePoint != 0 && c.state != SMissing | ||||
| } | ||||
|  | ||||
| type FormInfo struct { | ||||
| 	quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed | ||||
| 	verified   [MNumberOfModes]bool     // index: MComposed or MDecomposed | ||||
|  | ||||
| 	combinesForward  bool // May combine with rune on the right | ||||
| 	combinesBackward bool // May combine with rune on the left | ||||
| 	isOneWay         bool // Never appears in result | ||||
| 	inDecomp         bool // Some decompositions result in this char. | ||||
| 	decomp           Decomposition | ||||
| 	expandedDecomp   Decomposition | ||||
| } | ||||
|  | ||||
| func (f FormInfo) String() string { | ||||
| 	buf := bytes.NewBuffer(make([]byte, 0)) | ||||
|  | ||||
| 	fmt.Fprintf(buf, "    quickCheck[C]: %v\n", f.quickCheck[MComposed]) | ||||
| 	fmt.Fprintf(buf, "    quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) | ||||
| 	fmt.Fprintf(buf, "    cmbForward: %v\n", f.combinesForward) | ||||
| 	fmt.Fprintf(buf, "    cmbBackward: %v\n", f.combinesBackward) | ||||
| 	fmt.Fprintf(buf, "    isOneWay: %v\n", f.isOneWay) | ||||
| 	fmt.Fprintf(buf, "    inDecomp: %v\n", f.inDecomp) | ||||
| 	fmt.Fprintf(buf, "    decomposition: %X\n", f.decomp) | ||||
| 	fmt.Fprintf(buf, "    expandedDecomp: %X\n", f.expandedDecomp) | ||||
|  | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| type Decomposition []rune | ||||
|  | ||||
| func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { | ||||
| 	decomp := strings.Split(s, " ") | ||||
| 	if len(decomp) > 0 && skipfirst { | ||||
| 		decomp = decomp[1:] | ||||
| 	} | ||||
| 	for _, d := range decomp { | ||||
| 		point, err := strconv.ParseUint(d, 16, 64) | ||||
| 		if err != nil { | ||||
| 			return a, err | ||||
| 		} | ||||
| 		a = append(a, rune(point)) | ||||
| 	} | ||||
| 	return a, nil | ||||
| } | ||||
|  | ||||
| func loadUnicodeData() { | ||||
| 	f := gen.OpenUCDFile("UnicodeData.txt") | ||||
| 	defer f.Close() | ||||
| 	p := ucd.New(f) | ||||
| 	for p.Next() { | ||||
| 		r := p.Rune(ucd.CodePoint) | ||||
| 		char := &chars[r] | ||||
|  | ||||
| 		char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) | ||||
| 		decmap := p.String(ucd.DecompMapping) | ||||
|  | ||||
| 		exp, err := parseDecomposition(decmap, false) | ||||
| 		isCompat := false | ||||
| 		if err != nil { | ||||
| 			if len(decmap) > 0 { | ||||
| 				exp, err = parseDecomposition(decmap, true) | ||||
| 				if err != nil { | ||||
| 					log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) | ||||
| 				} | ||||
| 				isCompat = true | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		char.name = p.String(ucd.Name) | ||||
| 		char.codePoint = r | ||||
| 		char.forms[FCompatibility].decomp = exp | ||||
| 		if !isCompat { | ||||
| 			char.forms[FCanonical].decomp = exp | ||||
| 		} else { | ||||
| 			char.compatDecomp = true | ||||
| 		} | ||||
| 		if len(decmap) > 0 { | ||||
| 			char.forms[FCompatibility].decomp = exp | ||||
| 		} | ||||
| 	} | ||||
| 	if err := p.Err(); err != nil { | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // compactCCC converts the sparse set of CCC values to a continguous one, | ||||
| // reducing the number of bits needed from 8 to 6. | ||||
| func compactCCC() { | ||||
| 	m := make(map[uint8]uint8) | ||||
| 	for i := range chars { | ||||
| 		c := &chars[i] | ||||
| 		m[c.ccc] = 0 | ||||
| 	} | ||||
| 	cccs := []int{} | ||||
| 	for v, _ := range m { | ||||
| 		cccs = append(cccs, int(v)) | ||||
| 	} | ||||
| 	sort.Ints(cccs) | ||||
| 	for i, c := range cccs { | ||||
| 		cccMap[uint8(i)] = uint8(c) | ||||
| 		m[uint8(c)] = uint8(i) | ||||
| 	} | ||||
| 	for i := range chars { | ||||
| 		c := &chars[i] | ||||
| 		c.origCCC = c.ccc | ||||
| 		c.ccc = m[c.ccc] | ||||
| 	} | ||||
| 	if len(m) >= 1<<6 { | ||||
| 		log.Fatalf("too many difference CCC values: %d >= 64", len(m)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // CompositionExclusions.txt has form: | ||||
| // 0958    # ... | ||||
| // See http://unicode.org/reports/tr44/ for full explanation | ||||
| func loadCompositionExclusions() { | ||||
| 	f := gen.OpenUCDFile("CompositionExclusions.txt") | ||||
| 	defer f.Close() | ||||
| 	p := ucd.New(f) | ||||
| 	for p.Next() { | ||||
| 		c := &chars[p.Rune(0)] | ||||
| 		if c.excludeInComp { | ||||
| 			log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) | ||||
| 		} | ||||
| 		c.excludeInComp = true | ||||
| 	} | ||||
| 	if e := p.Err(); e != nil { | ||||
| 		log.Fatal(e) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // hasCompatDecomp returns true if any of the recursive | ||||
| // decompositions contains a compatibility expansion. | ||||
| // In this case, the character may not occur in NFK*. | ||||
| func hasCompatDecomp(r rune) bool { | ||||
| 	c := &chars[r] | ||||
| 	if c.compatDecomp { | ||||
| 		return true | ||||
| 	} | ||||
| 	for _, d := range c.forms[FCompatibility].decomp { | ||||
| 		if hasCompatDecomp(d) { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Hangul related constants. | ||||
| const ( | ||||
| 	HangulBase = 0xAC00 | ||||
| 	HangulEnd  = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) | ||||
|  | ||||
| 	JamoLBase = 0x1100 | ||||
| 	JamoLEnd  = 0x1113 | ||||
| 	JamoVBase = 0x1161 | ||||
| 	JamoVEnd  = 0x1176 | ||||
| 	JamoTBase = 0x11A8 | ||||
| 	JamoTEnd  = 0x11C3 | ||||
|  | ||||
| 	JamoLVTCount = 19 * 21 * 28 | ||||
| 	JamoTCount   = 28 | ||||
| ) | ||||
|  | ||||
| func isHangul(r rune) bool { | ||||
| 	return HangulBase <= r && r < HangulEnd | ||||
| } | ||||
|  | ||||
| func isHangulWithoutJamoT(r rune) bool { | ||||
| 	if !isHangul(r) { | ||||
| 		return false | ||||
| 	} | ||||
| 	r -= HangulBase | ||||
| 	return r < JamoLVTCount && r%JamoTCount == 0 | ||||
| } | ||||
|  | ||||
| func ccc(r rune) uint8 { | ||||
| 	return chars[r].ccc | ||||
| } | ||||
|  | ||||
| // Insert a rune in a buffer, ordered by Canonical Combining Class. | ||||
| func insertOrdered(b Decomposition, r rune) Decomposition { | ||||
| 	n := len(b) | ||||
| 	b = append(b, 0) | ||||
| 	cc := ccc(r) | ||||
| 	if cc > 0 { | ||||
| 		// Use bubble sort. | ||||
| 		for ; n > 0; n-- { | ||||
| 			if ccc(b[n-1]) <= cc { | ||||
| 				break | ||||
| 			} | ||||
| 			b[n] = b[n-1] | ||||
| 		} | ||||
| 	} | ||||
| 	b[n] = r | ||||
| 	return b | ||||
| } | ||||
|  | ||||
| // Recursively decompose. | ||||
| func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { | ||||
| 	dcomp := chars[r].forms[form].decomp | ||||
| 	if len(dcomp) == 0 { | ||||
| 		return insertOrdered(d, r) | ||||
| 	} | ||||
| 	for _, c := range dcomp { | ||||
| 		d = decomposeRecursive(form, c, d) | ||||
| 	} | ||||
| 	return d | ||||
| } | ||||
|  | ||||
| func completeCharFields(form int) { | ||||
| 	// Phase 0: pre-expand decomposition. | ||||
| 	for i := range chars { | ||||
| 		f := &chars[i].forms[form] | ||||
| 		if len(f.decomp) == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		exp := make(Decomposition, 0) | ||||
| 		for _, c := range f.decomp { | ||||
| 			exp = decomposeRecursive(form, c, exp) | ||||
| 		} | ||||
| 		f.expandedDecomp = exp | ||||
| 	} | ||||
|  | ||||
| 	// Phase 1: composition exclusion, mark decomposition. | ||||
| 	for i := range chars { | ||||
| 		c := &chars[i] | ||||
| 		f := &c.forms[form] | ||||
|  | ||||
| 		// Marks script-specific exclusions and version restricted. | ||||
| 		f.isOneWay = c.excludeInComp | ||||
|  | ||||
| 		// Singletons | ||||
| 		f.isOneWay = f.isOneWay || len(f.decomp) == 1 | ||||
|  | ||||
| 		// Non-starter decompositions | ||||
| 		if len(f.decomp) > 1 { | ||||
| 			chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 | ||||
| 			f.isOneWay = f.isOneWay || chk | ||||
| 		} | ||||
|  | ||||
| 		// Runes that decompose into more than two runes. | ||||
| 		f.isOneWay = f.isOneWay || len(f.decomp) > 2 | ||||
|  | ||||
| 		if form == FCompatibility { | ||||
| 			f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) | ||||
| 		} | ||||
|  | ||||
| 		for _, r := range f.decomp { | ||||
| 			chars[r].forms[form].inDecomp = true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Phase 2: forward and backward combining. | ||||
| 	for i := range chars { | ||||
| 		c := &chars[i] | ||||
| 		f := &c.forms[form] | ||||
|  | ||||
| 		if !f.isOneWay && len(f.decomp) == 2 { | ||||
| 			f0 := &chars[f.decomp[0]].forms[form] | ||||
| 			f1 := &chars[f.decomp[1]].forms[form] | ||||
| 			if !f0.isOneWay { | ||||
| 				f0.combinesForward = true | ||||
| 			} | ||||
| 			if !f1.isOneWay { | ||||
| 				f1.combinesBackward = true | ||||
| 			} | ||||
| 		} | ||||
| 		if isHangulWithoutJamoT(rune(i)) { | ||||
| 			f.combinesForward = true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Phase 3: quick check values. | ||||
| 	for i := range chars { | ||||
| 		c := &chars[i] | ||||
| 		f := &c.forms[form] | ||||
|  | ||||
| 		switch { | ||||
| 		case len(f.decomp) > 0: | ||||
| 			f.quickCheck[MDecomposed] = QCNo | ||||
| 		case isHangul(rune(i)): | ||||
| 			f.quickCheck[MDecomposed] = QCNo | ||||
| 		default: | ||||
| 			f.quickCheck[MDecomposed] = QCYes | ||||
| 		} | ||||
| 		switch { | ||||
| 		case f.isOneWay: | ||||
| 			f.quickCheck[MComposed] = QCNo | ||||
| 		case (i & 0xffff00) == JamoLBase: | ||||
| 			f.quickCheck[MComposed] = QCYes | ||||
| 			if JamoLBase <= i && i < JamoLEnd { | ||||
| 				f.combinesForward = true | ||||
| 			} | ||||
| 			if JamoVBase <= i && i < JamoVEnd { | ||||
| 				f.quickCheck[MComposed] = QCMaybe | ||||
| 				f.combinesBackward = true | ||||
| 				f.combinesForward = true | ||||
| 			} | ||||
| 			if JamoTBase <= i && i < JamoTEnd { | ||||
| 				f.quickCheck[MComposed] = QCMaybe | ||||
| 				f.combinesBackward = true | ||||
| 			} | ||||
| 		case !f.combinesBackward: | ||||
| 			f.quickCheck[MComposed] = QCYes | ||||
| 		default: | ||||
| 			f.quickCheck[MComposed] = QCMaybe | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func computeNonStarterCounts() { | ||||
| 	// Phase 4: leading and trailing non-starter count | ||||
| 	for i := range chars { | ||||
| 		c := &chars[i] | ||||
|  | ||||
| 		runes := []rune{rune(i)} | ||||
| 		// We always use FCompatibility so that the CGJ insertion points do not | ||||
| 		// change for repeated normalizations with different forms. | ||||
| 		if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { | ||||
| 			runes = exp | ||||
| 		} | ||||
| 		// We consider runes that combine backwards to be non-starters for the | ||||
| 		// purpose of Stream-Safe Text Processing. | ||||
| 		for _, r := range runes { | ||||
| 			if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { | ||||
| 				break | ||||
| 			} | ||||
| 			c.nLeadingNonStarters++ | ||||
| 		} | ||||
| 		for i := len(runes) - 1; i >= 0; i-- { | ||||
| 			if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { | ||||
| 				break | ||||
| 			} | ||||
| 			c.nTrailingNonStarters++ | ||||
| 		} | ||||
| 		if c.nTrailingNonStarters > 3 { | ||||
| 			log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) | ||||
| 		} | ||||
|  | ||||
| 		if isHangul(rune(i)) { | ||||
| 			c.nTrailingNonStarters = 2 | ||||
| 			if isHangulWithoutJamoT(rune(i)) { | ||||
| 				c.nTrailingNonStarters = 1 | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { | ||||
| 			log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) | ||||
| 		} | ||||
| 		if t := c.nTrailingNonStarters; t > 3 { | ||||
| 			log.Fatalf("%U: number of trailing non-starters is %d > 3", t) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func printBytes(w io.Writer, b []byte, name string) { | ||||
| 	fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) | ||||
| 	fmt.Fprintf(w, "var %s = [...]byte {", name) | ||||
| 	for i, c := range b { | ||||
| 		switch { | ||||
| 		case i%64 == 0: | ||||
| 			fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) | ||||
| 		case i%8 == 0: | ||||
| 			fmt.Fprintf(w, "\n") | ||||
| 		} | ||||
| 		fmt.Fprintf(w, "0x%.2X, ", c) | ||||
| 	} | ||||
| 	fmt.Fprint(w, "\n}\n\n") | ||||
| } | ||||
|  | ||||
| // See forminfo.go for format. | ||||
| func makeEntry(f *FormInfo, c *Char) uint16 { | ||||
| 	e := uint16(0) | ||||
| 	if r := c.codePoint; HangulBase <= r && r < HangulEnd { | ||||
| 		e |= 0x40 | ||||
| 	} | ||||
| 	if f.combinesForward { | ||||
| 		e |= 0x20 | ||||
| 	} | ||||
| 	if f.quickCheck[MDecomposed] == QCNo { | ||||
| 		e |= 0x4 | ||||
| 	} | ||||
| 	switch f.quickCheck[MComposed] { | ||||
| 	case QCYes: | ||||
| 	case QCNo: | ||||
| 		e |= 0x10 | ||||
| 	case QCMaybe: | ||||
| 		e |= 0x18 | ||||
| 	default: | ||||
| 		log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) | ||||
| 	} | ||||
| 	e |= uint16(c.nTrailingNonStarters) | ||||
| 	return e | ||||
| } | ||||
|  | ||||
| // decompSet keeps track of unique decompositions, grouped by whether | ||||
| // the decomposition is followed by a trailing and/or leading CCC. | ||||
| type decompSet [7]map[string]bool | ||||
|  | ||||
| const ( | ||||
| 	normalDecomp = iota | ||||
| 	firstMulti | ||||
| 	firstCCC | ||||
| 	endMulti | ||||
| 	firstLeadingCCC | ||||
| 	firstCCCZeroExcept | ||||
| 	firstStarterWithNLead | ||||
| 	lastDecomp | ||||
| ) | ||||
|  | ||||
| var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} | ||||
|  | ||||
| func makeDecompSet() decompSet { | ||||
| 	m := decompSet{} | ||||
| 	for i := range m { | ||||
| 		m[i] = make(map[string]bool) | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
| func (m *decompSet) insert(key int, s string) { | ||||
| 	m[key][s] = true | ||||
| } | ||||
|  | ||||
| func printCharInfoTables(w io.Writer) int { | ||||
| 	mkstr := func(r rune, f *FormInfo) (int, string) { | ||||
| 		d := f.expandedDecomp | ||||
| 		s := string([]rune(d)) | ||||
| 		if max := 1 << 6; len(s) >= max { | ||||
| 			const msg = "%U: too many bytes in decomposition: %d >= %d" | ||||
| 			log.Fatalf(msg, r, len(s), max) | ||||
| 		} | ||||
| 		head := uint8(len(s)) | ||||
| 		if f.quickCheck[MComposed] != QCYes { | ||||
| 			head |= 0x40 | ||||
| 		} | ||||
| 		if f.combinesForward { | ||||
| 			head |= 0x80 | ||||
| 		} | ||||
| 		s = string([]byte{head}) + s | ||||
|  | ||||
| 		lccc := ccc(d[0]) | ||||
| 		tccc := ccc(d[len(d)-1]) | ||||
| 		cc := ccc(r) | ||||
| 		if cc != 0 && lccc == 0 && tccc == 0 { | ||||
| 			log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) | ||||
| 		} | ||||
| 		if tccc < lccc && lccc != 0 { | ||||
| 			const msg = "%U: lccc (%d) must be <= tcc (%d)" | ||||
| 			log.Fatalf(msg, r, lccc, tccc) | ||||
| 		} | ||||
| 		index := normalDecomp | ||||
| 		nTrail := chars[r].nTrailingNonStarters | ||||
| 		nLead := chars[r].nLeadingNonStarters | ||||
| 		if tccc > 0 || lccc > 0 || nTrail > 0 { | ||||
| 			tccc <<= 2 | ||||
| 			tccc |= nTrail | ||||
| 			s += string([]byte{tccc}) | ||||
| 			index = endMulti | ||||
| 			for _, r := range d[1:] { | ||||
| 				if ccc(r) == 0 { | ||||
| 					index = firstCCC | ||||
| 				} | ||||
| 			} | ||||
| 			if lccc > 0 || nLead > 0 { | ||||
| 				s += string([]byte{lccc}) | ||||
| 				if index == firstCCC { | ||||
| 					log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) | ||||
| 				} | ||||
| 				index = firstLeadingCCC | ||||
| 			} | ||||
| 			if cc != lccc { | ||||
| 				if cc != 0 { | ||||
| 					log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) | ||||
| 				} | ||||
| 				index = firstCCCZeroExcept | ||||
| 			} | ||||
| 		} else if len(d) > 1 { | ||||
| 			index = firstMulti | ||||
| 		} | ||||
| 		return index, s | ||||
| 	} | ||||
|  | ||||
| 	decompSet := makeDecompSet() | ||||
| 	const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. | ||||
| 	decompSet.insert(firstStarterWithNLead, nLeadStr) | ||||
|  | ||||
| 	// Store the uniqued decompositions in a byte buffer, | ||||
| 	// preceded by their byte length. | ||||
| 	for _, c := range chars { | ||||
| 		for _, f := range c.forms { | ||||
| 			if len(f.expandedDecomp) == 0 { | ||||
| 				continue | ||||
| 			} | ||||
| 			if f.combinesBackward { | ||||
| 				log.Fatalf("%U: combinesBackward and decompose", c.codePoint) | ||||
| 			} | ||||
| 			index, s := mkstr(c.codePoint, &f) | ||||
| 			decompSet.insert(index, s) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) | ||||
| 	size := 0 | ||||
| 	positionMap := make(map[string]uint16) | ||||
| 	decompositions.WriteString("\000") | ||||
| 	fmt.Fprintln(w, "const (") | ||||
| 	for i, m := range decompSet { | ||||
| 		sa := []string{} | ||||
| 		for s := range m { | ||||
| 			sa = append(sa, s) | ||||
| 		} | ||||
| 		sort.Strings(sa) | ||||
| 		for _, s := range sa { | ||||
| 			p := decompositions.Len() | ||||
| 			decompositions.WriteString(s) | ||||
| 			positionMap[s] = uint16(p) | ||||
| 		} | ||||
| 		if cname[i] != "" { | ||||
| 			fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) | ||||
| 		} | ||||
| 	} | ||||
| 	fmt.Fprintln(w, "maxDecomp = 0x8000") | ||||
| 	fmt.Fprintln(w, ")") | ||||
| 	b := decompositions.Bytes() | ||||
| 	printBytes(w, b, "decomps") | ||||
| 	size += len(b) | ||||
|  | ||||
| 	varnames := []string{"nfc", "nfkc"} | ||||
| 	for i := 0; i < FNumberOfFormTypes; i++ { | ||||
| 		trie := triegen.NewTrie(varnames[i]) | ||||
|  | ||||
| 		for r, c := range chars { | ||||
| 			f := c.forms[i] | ||||
| 			d := f.expandedDecomp | ||||
| 			if len(d) != 0 { | ||||
| 				_, key := mkstr(c.codePoint, &f) | ||||
| 				trie.Insert(rune(r), uint64(positionMap[key])) | ||||
| 				if c.ccc != ccc(d[0]) { | ||||
| 					// We assume the lead ccc of a decomposition !=0 in this case. | ||||
| 					if ccc(d[0]) == 0 { | ||||
| 						log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) | ||||
| 					} | ||||
| 				} | ||||
| 			} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { | ||||
| 				// Handle cases where it can't be detected that the nLead should be equal | ||||
| 				// to nTrail. | ||||
| 				trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) | ||||
| 			} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { | ||||
| 				trie.Insert(c.codePoint, uint64(0x8000|v)) | ||||
| 			} | ||||
| 		} | ||||
| 		sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) | ||||
| 		if err != nil { | ||||
| 			log.Fatal(err) | ||||
| 		} | ||||
| 		size += sz | ||||
| 	} | ||||
| 	return size | ||||
| } | ||||
|  | ||||
| func contains(sa []string, s string) bool { | ||||
| 	for _, a := range sa { | ||||
| 		if a == s { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func makeTables() { | ||||
| 	w := &bytes.Buffer{} | ||||
|  | ||||
| 	size := 0 | ||||
| 	if *tablelist == "" { | ||||
| 		return | ||||
| 	} | ||||
| 	list := strings.Split(*tablelist, ",") | ||||
| 	if *tablelist == "all" { | ||||
| 		list = []string{"recomp", "info"} | ||||
| 	} | ||||
|  | ||||
| 	// Compute maximum decomposition size. | ||||
| 	max := 0 | ||||
| 	for _, c := range chars { | ||||
| 		if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { | ||||
| 			max = n | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	fmt.Fprintln(w, "const (") | ||||
| 	fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") | ||||
| 	fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) | ||||
| 	fmt.Fprintln(w) | ||||
| 	fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") | ||||
| 	fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") | ||||
| 	fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") | ||||
| 	fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") | ||||
| 	fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) | ||||
| 	fmt.Fprintln(w, ")\n") | ||||
|  | ||||
| 	// Print the CCC remap table. | ||||
| 	size += len(cccMap) | ||||
| 	fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) | ||||
| 	for i := 0; i < len(cccMap); i++ { | ||||
| 		if i%8 == 0 { | ||||
| 			fmt.Fprintln(w) | ||||
| 		} | ||||
| 		fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) | ||||
| 	} | ||||
| 	fmt.Fprintln(w, "\n}\n") | ||||
|  | ||||
| 	if contains(list, "info") { | ||||
| 		size += printCharInfoTables(w) | ||||
| 	} | ||||
|  | ||||
| 	if contains(list, "recomp") { | ||||
| 		// Note that we use 32 bit keys, instead of 64 bit. | ||||
| 		// This clips the bits of three entries, but we know | ||||
| 		// this won't cause a collision. The compiler will catch | ||||
| 		// any changes made to UnicodeData.txt that introduces | ||||
| 		// a collision. | ||||
| 		// Note that the recomposition map for NFC and NFKC | ||||
| 		// are identical. | ||||
|  | ||||
| 		// Recomposition map | ||||
| 		nrentries := 0 | ||||
| 		for _, c := range chars { | ||||
| 			f := c.forms[FCanonical] | ||||
| 			if !f.isOneWay && len(f.decomp) > 0 { | ||||
| 				nrentries++ | ||||
| 			} | ||||
| 		} | ||||
| 		sz := nrentries * 8 | ||||
| 		size += sz | ||||
| 		fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) | ||||
| 		fmt.Fprintln(w, "var recompMap = map[uint32]rune{") | ||||
| 		for i, c := range chars { | ||||
| 			f := c.forms[FCanonical] | ||||
| 			d := f.decomp | ||||
| 			if !f.isOneWay && len(d) > 0 { | ||||
| 				key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) | ||||
| 				fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i) | ||||
| 			} | ||||
| 		} | ||||
| 		fmt.Fprintf(w, "}\n\n") | ||||
| 	} | ||||
|  | ||||
| 	fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) | ||||
| 	gen.WriteGoFile("tables.go", "norm", w.Bytes()) | ||||
| } | ||||
|  | ||||
| func printChars() { | ||||
| 	if *verbose { | ||||
| 		for _, c := range chars { | ||||
| 			if !c.isValid() || c.state == SMissing { | ||||
| 				continue | ||||
| 			} | ||||
| 			fmt.Println(c) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // verifyComputed does various consistency tests. | ||||
| func verifyComputed() { | ||||
| 	for i, c := range chars { | ||||
| 		for _, f := range c.forms { | ||||
| 			isNo := (f.quickCheck[MDecomposed] == QCNo) | ||||
| 			if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { | ||||
| 				log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) | ||||
| 			} | ||||
|  | ||||
| 			isMaybe := f.quickCheck[MComposed] == QCMaybe | ||||
| 			if f.combinesBackward != isMaybe { | ||||
| 				log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) | ||||
| 			} | ||||
| 			if len(f.decomp) > 0 && f.combinesForward && isMaybe { | ||||
| 				log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) | ||||
| 			} | ||||
|  | ||||
| 			if len(f.expandedDecomp) != 0 { | ||||
| 				continue | ||||
| 			} | ||||
| 			if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { | ||||
| 				// We accept these runes to be treated differently (it only affects | ||||
| 				// segment breaking in iteration, most likely on improper use), but | ||||
| 				// reconsider if more characters are added. | ||||
| 				// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;; | ||||
| 				// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;; | ||||
| 				// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; | ||||
| 				// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;; | ||||
| 				// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; | ||||
| 				// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;; | ||||
| 				if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { | ||||
| 					log.Fatalf("%U: nLead was %v; want %v", i, a, b) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		nfc := c.forms[FCanonical] | ||||
| 		nfkc := c.forms[FCompatibility] | ||||
| 		if nfc.combinesBackward != nfkc.combinesBackward { | ||||
| 			log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Use values in DerivedNormalizationProps.txt to compare against the | ||||
| // values we computed. | ||||
| // DerivedNormalizationProps.txt has form: | ||||
| // 00C0..00C5    ; NFD_QC; N # ... | ||||
| // 0374          ; NFD_QC; N # ... | ||||
| // See http://unicode.org/reports/tr44/ for full explanation | ||||
| func testDerived() { | ||||
| 	f := gen.OpenUCDFile("DerivedNormalizationProps.txt") | ||||
| 	defer f.Close() | ||||
| 	p := ucd.New(f) | ||||
| 	for p.Next() { | ||||
| 		r := p.Rune(0) | ||||
| 		c := &chars[r] | ||||
|  | ||||
| 		var ftype, mode int | ||||
| 		qt := p.String(1) | ||||
| 		switch qt { | ||||
| 		case "NFC_QC": | ||||
| 			ftype, mode = FCanonical, MComposed | ||||
| 		case "NFD_QC": | ||||
| 			ftype, mode = FCanonical, MDecomposed | ||||
| 		case "NFKC_QC": | ||||
| 			ftype, mode = FCompatibility, MComposed | ||||
| 		case "NFKD_QC": | ||||
| 			ftype, mode = FCompatibility, MDecomposed | ||||
| 		default: | ||||
| 			continue | ||||
| 		} | ||||
| 		var qr QCResult | ||||
| 		switch p.String(2) { | ||||
| 		case "Y": | ||||
| 			qr = QCYes | ||||
| 		case "N": | ||||
| 			qr = QCNo | ||||
| 		case "M": | ||||
| 			qr = QCMaybe | ||||
| 		default: | ||||
| 			log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) | ||||
| 		} | ||||
| 		if got := c.forms[ftype].quickCheck[mode]; got != qr { | ||||
| 			log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) | ||||
| 		} | ||||
| 		c.forms[ftype].verified[mode] = true | ||||
| 	} | ||||
| 	if err := p.Err(); err != nil { | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| 	// Any unspecified value must be QCYes. Verify this. | ||||
| 	for i, c := range chars { | ||||
| 		for j, fd := range c.forms { | ||||
| 			for k, qr := range fd.quickCheck { | ||||
| 				if !fd.verified[k] && qr != QCYes { | ||||
| 					m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" | ||||
| 					log.Printf(m, i, j, k, qr, c.name) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var testHeader = `const ( | ||||
| 	Yes = iota | ||||
| 	No | ||||
| 	Maybe | ||||
| ) | ||||
|  | ||||
| type formData struct { | ||||
| 	qc              uint8 | ||||
| 	combinesForward bool | ||||
| 	decomposition   string | ||||
| } | ||||
|  | ||||
| type runeData struct { | ||||
| 	r      rune | ||||
| 	ccc    uint8 | ||||
| 	nLead  uint8 | ||||
| 	nTrail uint8 | ||||
| 	f      [2]formData // 0: canonical; 1: compatibility | ||||
| } | ||||
|  | ||||
| func f(qc uint8, cf bool, dec string) [2]formData { | ||||
| 	return [2]formData{{qc, cf, dec}, {qc, cf, dec}} | ||||
| } | ||||
|  | ||||
| func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { | ||||
| 	return [2]formData{{qc, cf, d}, {qck, cfk, dk}} | ||||
| } | ||||
|  | ||||
| var testData = []runeData{ | ||||
| ` | ||||
|  | ||||
| func printTestdata() { | ||||
| 	type lastInfo struct { | ||||
| 		ccc    uint8 | ||||
| 		nLead  uint8 | ||||
| 		nTrail uint8 | ||||
| 		f      string | ||||
| 	} | ||||
|  | ||||
| 	last := lastInfo{} | ||||
| 	w := &bytes.Buffer{} | ||||
| 	fmt.Fprintf(w, testHeader) | ||||
| 	for r, c := range chars { | ||||
| 		f := c.forms[FCanonical] | ||||
| 		qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) | ||||
| 		f = c.forms[FCompatibility] | ||||
| 		qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) | ||||
| 		s := "" | ||||
| 		if d == dk && qc == qck && cf == cfk { | ||||
| 			s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) | ||||
| 		} else { | ||||
| 			s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) | ||||
| 		} | ||||
| 		current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} | ||||
| 		if last != current { | ||||
| 			fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) | ||||
| 			last = current | ||||
| 		} | ||||
| 	} | ||||
| 	fmt.Fprintln(w, "}") | ||||
| 	gen.WriteGoFile("data_test.go", "norm", w.Bytes()) | ||||
| } | ||||
							
								
								
									
										609
									
								
								vendor/golang.org/x/text/unicode/norm/normalize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										609
									
								
								vendor/golang.org/x/text/unicode/norm/normalize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,609 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Note: the file data_test.go that is generated should not be checked in. | ||||
| //go:generate go run maketables.go triegen.go | ||||
| //go:generate go test -tags test | ||||
|  | ||||
| // Package norm contains types and functions for normalizing Unicode strings. | ||||
| package norm // import "golang.org/x/text/unicode/norm" | ||||
|  | ||||
| import ( | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"golang.org/x/text/transform" | ||||
| ) | ||||
|  | ||||
| // A Form denotes a canonical representation of Unicode code points. | ||||
| // The Unicode-defined normalization and equivalence forms are: | ||||
| // | ||||
| //   NFC   Unicode Normalization Form C | ||||
| //   NFD   Unicode Normalization Form D | ||||
| //   NFKC  Unicode Normalization Form KC | ||||
| //   NFKD  Unicode Normalization Form KD | ||||
| // | ||||
| // For a Form f, this documentation uses the notation f(x) to mean | ||||
| // the bytes or string x converted to the given form. | ||||
| // A position n in x is called a boundary if conversion to the form can | ||||
| // proceed independently on both sides: | ||||
| //   f(x) == append(f(x[0:n]), f(x[n:])...) | ||||
| // | ||||
| // References: http://unicode.org/reports/tr15/ and | ||||
| // http://unicode.org/notes/tn5/. | ||||
| type Form int | ||||
|  | ||||
| const ( | ||||
| 	NFC Form = iota | ||||
| 	NFD | ||||
| 	NFKC | ||||
| 	NFKD | ||||
| ) | ||||
|  | ||||
| // Bytes returns f(b). May return b if f(b) = b. | ||||
| func (f Form) Bytes(b []byte) []byte { | ||||
| 	src := inputBytes(b) | ||||
| 	ft := formTable[f] | ||||
| 	n, ok := ft.quickSpan(src, 0, len(b), true) | ||||
| 	if ok { | ||||
| 		return b | ||||
| 	} | ||||
| 	out := make([]byte, n, len(b)) | ||||
| 	copy(out, b[0:n]) | ||||
| 	rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush} | ||||
| 	return doAppendInner(&rb, n) | ||||
| } | ||||
|  | ||||
| // String returns f(s). | ||||
| func (f Form) String(s string) string { | ||||
| 	src := inputString(s) | ||||
| 	ft := formTable[f] | ||||
| 	n, ok := ft.quickSpan(src, 0, len(s), true) | ||||
| 	if ok { | ||||
| 		return s | ||||
| 	} | ||||
| 	out := make([]byte, n, len(s)) | ||||
| 	copy(out, s[0:n]) | ||||
| 	rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush} | ||||
| 	return string(doAppendInner(&rb, n)) | ||||
| } | ||||
|  | ||||
| // IsNormal returns true if b == f(b). | ||||
| func (f Form) IsNormal(b []byte) bool { | ||||
| 	src := inputBytes(b) | ||||
| 	ft := formTable[f] | ||||
| 	bp, ok := ft.quickSpan(src, 0, len(b), true) | ||||
| 	if ok { | ||||
| 		return true | ||||
| 	} | ||||
| 	rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)} | ||||
| 	rb.setFlusher(nil, cmpNormalBytes) | ||||
| 	for bp < len(b) { | ||||
| 		rb.out = b[bp:] | ||||
| 		if bp = decomposeSegment(&rb, bp, true); bp < 0 { | ||||
| 			return false | ||||
| 		} | ||||
| 		bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true) | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func cmpNormalBytes(rb *reorderBuffer) bool { | ||||
| 	b := rb.out | ||||
| 	for i := 0; i < rb.nrune; i++ { | ||||
| 		info := rb.rune[i] | ||||
| 		if int(info.size) > len(b) { | ||||
| 			return false | ||||
| 		} | ||||
| 		p := info.pos | ||||
| 		pe := p + info.size | ||||
| 		for ; p < pe; p++ { | ||||
| 			if b[0] != rb.byte[p] { | ||||
| 				return false | ||||
| 			} | ||||
| 			b = b[1:] | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // IsNormalString returns true if s == f(s). | ||||
| func (f Form) IsNormalString(s string) bool { | ||||
| 	src := inputString(s) | ||||
| 	ft := formTable[f] | ||||
| 	bp, ok := ft.quickSpan(src, 0, len(s), true) | ||||
| 	if ok { | ||||
| 		return true | ||||
| 	} | ||||
| 	rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)} | ||||
| 	rb.setFlusher(nil, func(rb *reorderBuffer) bool { | ||||
| 		for i := 0; i < rb.nrune; i++ { | ||||
| 			info := rb.rune[i] | ||||
| 			if bp+int(info.size) > len(s) { | ||||
| 				return false | ||||
| 			} | ||||
| 			p := info.pos | ||||
| 			pe := p + info.size | ||||
| 			for ; p < pe; p++ { | ||||
| 				if s[bp] != rb.byte[p] { | ||||
| 					return false | ||||
| 				} | ||||
| 				bp++ | ||||
| 			} | ||||
| 		} | ||||
| 		return true | ||||
| 	}) | ||||
| 	for bp < len(s) { | ||||
| 		if bp = decomposeSegment(&rb, bp, true); bp < 0 { | ||||
| 			return false | ||||
| 		} | ||||
| 		bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true) | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // patchTail fixes a case where a rune may be incorrectly normalized | ||||
| // if it is followed by illegal continuation bytes. It returns the | ||||
| // patched buffer and whether the decomposition is still in progress. | ||||
| func patchTail(rb *reorderBuffer) bool { | ||||
| 	info, p := lastRuneStart(&rb.f, rb.out) | ||||
| 	if p == -1 || info.size == 0 { | ||||
| 		return true | ||||
| 	} | ||||
| 	end := p + int(info.size) | ||||
| 	extra := len(rb.out) - end | ||||
| 	if extra > 0 { | ||||
| 		// Potentially allocating memory. However, this only | ||||
| 		// happens with ill-formed UTF-8. | ||||
| 		x := make([]byte, 0) | ||||
| 		x = append(x, rb.out[len(rb.out)-extra:]...) | ||||
| 		rb.out = rb.out[:end] | ||||
| 		decomposeToLastBoundary(rb) | ||||
| 		rb.doFlush() | ||||
| 		rb.out = append(rb.out, x...) | ||||
| 		return false | ||||
| 	} | ||||
| 	buf := rb.out[p:] | ||||
| 	rb.out = rb.out[:p] | ||||
| 	decomposeToLastBoundary(rb) | ||||
| 	if s := rb.ss.next(info); s == ssStarter { | ||||
| 		rb.doFlush() | ||||
| 		rb.ss.first(info) | ||||
| 	} else if s == ssOverflow { | ||||
| 		rb.doFlush() | ||||
| 		rb.insertCGJ() | ||||
| 		rb.ss = 0 | ||||
| 	} | ||||
| 	rb.insertUnsafe(inputBytes(buf), 0, info) | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func appendQuick(rb *reorderBuffer, i int) int { | ||||
| 	if rb.nsrc == i { | ||||
| 		return i | ||||
| 	} | ||||
| 	end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true) | ||||
| 	rb.out = rb.src.appendSlice(rb.out, i, end) | ||||
| 	return end | ||||
| } | ||||
|  | ||||
| // Append returns f(append(out, b...)). | ||||
| // The buffer out must be nil, empty, or equal to f(out). | ||||
| func (f Form) Append(out []byte, src ...byte) []byte { | ||||
| 	return f.doAppend(out, inputBytes(src), len(src)) | ||||
| } | ||||
|  | ||||
| func (f Form) doAppend(out []byte, src input, n int) []byte { | ||||
| 	if n == 0 { | ||||
| 		return out | ||||
| 	} | ||||
| 	ft := formTable[f] | ||||
| 	// Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer. | ||||
| 	if len(out) == 0 { | ||||
| 		p, _ := ft.quickSpan(src, 0, n, true) | ||||
| 		out = src.appendSlice(out, 0, p) | ||||
| 		if p == n { | ||||
| 			return out | ||||
| 		} | ||||
| 		rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush} | ||||
| 		return doAppendInner(&rb, p) | ||||
| 	} | ||||
| 	rb := reorderBuffer{f: *ft, src: src, nsrc: n} | ||||
| 	return doAppend(&rb, out, 0) | ||||
| } | ||||
|  | ||||
| func doAppend(rb *reorderBuffer, out []byte, p int) []byte { | ||||
| 	rb.setFlusher(out, appendFlush) | ||||
| 	src, n := rb.src, rb.nsrc | ||||
| 	doMerge := len(out) > 0 | ||||
| 	if q := src.skipContinuationBytes(p); q > p { | ||||
| 		// Move leading non-starters to destination. | ||||
| 		rb.out = src.appendSlice(rb.out, p, q) | ||||
| 		p = q | ||||
| 		doMerge = patchTail(rb) | ||||
| 	} | ||||
| 	fd := &rb.f | ||||
| 	if doMerge { | ||||
| 		var info Properties | ||||
| 		if p < n { | ||||
| 			info = fd.info(src, p) | ||||
| 			if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 { | ||||
| 				if p == 0 { | ||||
| 					decomposeToLastBoundary(rb) | ||||
| 				} | ||||
| 				p = decomposeSegment(rb, p, true) | ||||
| 			} | ||||
| 		} | ||||
| 		if info.size == 0 { | ||||
| 			rb.doFlush() | ||||
| 			// Append incomplete UTF-8 encoding. | ||||
| 			return src.appendSlice(rb.out, p, n) | ||||
| 		} | ||||
| 		if rb.nrune > 0 { | ||||
| 			return doAppendInner(rb, p) | ||||
| 		} | ||||
| 	} | ||||
| 	p = appendQuick(rb, p) | ||||
| 	return doAppendInner(rb, p) | ||||
| } | ||||
|  | ||||
| func doAppendInner(rb *reorderBuffer, p int) []byte { | ||||
| 	for n := rb.nsrc; p < n; { | ||||
| 		p = decomposeSegment(rb, p, true) | ||||
| 		p = appendQuick(rb, p) | ||||
| 	} | ||||
| 	return rb.out | ||||
| } | ||||
|  | ||||
| // AppendString returns f(append(out, []byte(s))). | ||||
| // The buffer out must be nil, empty, or equal to f(out). | ||||
| func (f Form) AppendString(out []byte, src string) []byte { | ||||
| 	return f.doAppend(out, inputString(src), len(src)) | ||||
| } | ||||
|  | ||||
| // QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]). | ||||
| // It is not guaranteed to return the largest such n. | ||||
| func (f Form) QuickSpan(b []byte) int { | ||||
| 	n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true) | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| // Span implements transform.SpanningTransformer. It returns a boundary n such | ||||
| // that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n. | ||||
| func (f Form) Span(b []byte, atEOF bool) (n int, err error) { | ||||
| 	n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF) | ||||
| 	if n < len(b) { | ||||
| 		if !ok { | ||||
| 			err = transform.ErrEndOfSpan | ||||
| 		} else { | ||||
| 			err = transform.ErrShortSrc | ||||
| 		} | ||||
| 	} | ||||
| 	return n, err | ||||
| } | ||||
|  | ||||
| // SpanString returns a boundary n such that s[0:n] == f(s[0:n]). | ||||
| // It is not guaranteed to return the largest such n. | ||||
| func (f Form) SpanString(s string, atEOF bool) (n int, err error) { | ||||
| 	n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF) | ||||
| 	if n < len(s) { | ||||
| 		if !ok { | ||||
| 			err = transform.ErrEndOfSpan | ||||
| 		} else { | ||||
| 			err = transform.ErrShortSrc | ||||
| 		} | ||||
| 	} | ||||
| 	return n, err | ||||
| } | ||||
|  | ||||
| // quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and | ||||
| // whether any non-normalized parts were found. If atEOF is false, n will | ||||
| // not point past the last segment if this segment might be become | ||||
| // non-normalized by appending other runes. | ||||
| func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) { | ||||
| 	var lastCC uint8 | ||||
| 	ss := streamSafe(0) | ||||
| 	lastSegStart := i | ||||
| 	for n = end; i < n; { | ||||
| 		if j := src.skipASCII(i, n); i != j { | ||||
| 			i = j | ||||
| 			lastSegStart = i - 1 | ||||
| 			lastCC = 0 | ||||
| 			ss = 0 | ||||
| 			continue | ||||
| 		} | ||||
| 		info := f.info(src, i) | ||||
| 		if info.size == 0 { | ||||
| 			if atEOF { | ||||
| 				// include incomplete runes | ||||
| 				return n, true | ||||
| 			} | ||||
| 			return lastSegStart, true | ||||
| 		} | ||||
| 		// This block needs to be before the next, because it is possible to | ||||
| 		// have an overflow for runes that are starters (e.g. with U+FF9E). | ||||
| 		switch ss.next(info) { | ||||
| 		case ssStarter: | ||||
| 			lastSegStart = i | ||||
| 		case ssOverflow: | ||||
| 			return lastSegStart, false | ||||
| 		case ssSuccess: | ||||
| 			if lastCC > info.ccc { | ||||
| 				return lastSegStart, false | ||||
| 			} | ||||
| 		} | ||||
| 		if f.composing { | ||||
| 			if !info.isYesC() { | ||||
| 				break | ||||
| 			} | ||||
| 		} else { | ||||
| 			if !info.isYesD() { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		lastCC = info.ccc | ||||
| 		i += int(info.size) | ||||
| 	} | ||||
| 	if i == n { | ||||
| 		if !atEOF { | ||||
| 			n = lastSegStart | ||||
| 		} | ||||
| 		return n, true | ||||
| 	} | ||||
| 	return lastSegStart, false | ||||
| } | ||||
|  | ||||
| // QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]). | ||||
| // It is not guaranteed to return the largest such n. | ||||
| func (f Form) QuickSpanString(s string) int { | ||||
| 	n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true) | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| // FirstBoundary returns the position i of the first boundary in b | ||||
| // or -1 if b contains no boundary. | ||||
| func (f Form) FirstBoundary(b []byte) int { | ||||
| 	return f.firstBoundary(inputBytes(b), len(b)) | ||||
| } | ||||
|  | ||||
| func (f Form) firstBoundary(src input, nsrc int) int { | ||||
| 	i := src.skipContinuationBytes(0) | ||||
| 	if i >= nsrc { | ||||
| 		return -1 | ||||
| 	} | ||||
| 	fd := formTable[f] | ||||
| 	ss := streamSafe(0) | ||||
| 	// We should call ss.first here, but we can't as the first rune is | ||||
| 	// skipped already. This means FirstBoundary can't really determine | ||||
| 	// CGJ insertion points correctly. Luckily it doesn't have to. | ||||
| 	for { | ||||
| 		info := fd.info(src, i) | ||||
| 		if info.size == 0 { | ||||
| 			return -1 | ||||
| 		} | ||||
| 		if s := ss.next(info); s != ssSuccess { | ||||
| 			return i | ||||
| 		} | ||||
| 		i += int(info.size) | ||||
| 		if i >= nsrc { | ||||
| 			if !info.BoundaryAfter() && !ss.isMax() { | ||||
| 				return -1 | ||||
| 			} | ||||
| 			return nsrc | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // FirstBoundaryInString returns the position i of the first boundary in s | ||||
| // or -1 if s contains no boundary. | ||||
| func (f Form) FirstBoundaryInString(s string) int { | ||||
| 	return f.firstBoundary(inputString(s), len(s)) | ||||
| } | ||||
|  | ||||
| // NextBoundary reports the index of the boundary between the first and next | ||||
| // segment in b or -1 if atEOF is false and there are not enough bytes to | ||||
| // determine this boundary. | ||||
| func (f Form) NextBoundary(b []byte, atEOF bool) int { | ||||
| 	return f.nextBoundary(inputBytes(b), len(b), atEOF) | ||||
| } | ||||
|  | ||||
| // NextBoundaryInString reports the index of the boundary between the first and | ||||
| // next segment in b or -1 if atEOF is false and there are not enough bytes to | ||||
| // determine this boundary. | ||||
| func (f Form) NextBoundaryInString(s string, atEOF bool) int { | ||||
| 	return f.nextBoundary(inputString(s), len(s), atEOF) | ||||
| } | ||||
|  | ||||
| func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int { | ||||
| 	if nsrc == 0 { | ||||
| 		if atEOF { | ||||
| 			return 0 | ||||
| 		} | ||||
| 		return -1 | ||||
| 	} | ||||
| 	fd := formTable[f] | ||||
| 	info := fd.info(src, 0) | ||||
| 	if info.size == 0 { | ||||
| 		if atEOF { | ||||
| 			return 1 | ||||
| 		} | ||||
| 		return -1 | ||||
| 	} | ||||
| 	ss := streamSafe(0) | ||||
| 	ss.first(info) | ||||
|  | ||||
| 	for i := int(info.size); i < nsrc; i += int(info.size) { | ||||
| 		info = fd.info(src, i) | ||||
| 		if info.size == 0 { | ||||
| 			if atEOF { | ||||
| 				return i | ||||
| 			} | ||||
| 			return -1 | ||||
| 		} | ||||
| 		// TODO: Using streamSafe to determine the boundary isn't the same as | ||||
| 		// using BoundaryBefore. Determine which should be used. | ||||
| 		if s := ss.next(info); s != ssSuccess { | ||||
| 			return i | ||||
| 		} | ||||
| 	} | ||||
| 	if !atEOF && !info.BoundaryAfter() && !ss.isMax() { | ||||
| 		return -1 | ||||
| 	} | ||||
| 	return nsrc | ||||
| } | ||||
|  | ||||
| // LastBoundary returns the position i of the last boundary in b | ||||
| // or -1 if b contains no boundary. | ||||
| func (f Form) LastBoundary(b []byte) int { | ||||
| 	return lastBoundary(formTable[f], b) | ||||
| } | ||||
|  | ||||
| func lastBoundary(fd *formInfo, b []byte) int { | ||||
| 	i := len(b) | ||||
| 	info, p := lastRuneStart(fd, b) | ||||
| 	if p == -1 { | ||||
| 		return -1 | ||||
| 	} | ||||
| 	if info.size == 0 { // ends with incomplete rune | ||||
| 		if p == 0 { // starts with incomplete rune | ||||
| 			return -1 | ||||
| 		} | ||||
| 		i = p | ||||
| 		info, p = lastRuneStart(fd, b[:i]) | ||||
| 		if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter | ||||
| 			return i | ||||
| 		} | ||||
| 	} | ||||
| 	if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8 | ||||
| 		return i | ||||
| 	} | ||||
| 	if info.BoundaryAfter() { | ||||
| 		return i | ||||
| 	} | ||||
| 	ss := streamSafe(0) | ||||
| 	v := ss.backwards(info) | ||||
| 	for i = p; i >= 0 && v != ssStarter; i = p { | ||||
| 		info, p = lastRuneStart(fd, b[:i]) | ||||
| 		if v = ss.backwards(info); v == ssOverflow { | ||||
| 			break | ||||
| 		} | ||||
| 		if p+int(info.size) != i { | ||||
| 			if p == -1 { // no boundary found | ||||
| 				return -1 | ||||
| 			} | ||||
| 			return i // boundary after an illegal UTF-8 encoding | ||||
| 		} | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| // decomposeSegment scans the first segment in src into rb. It inserts 0x034f | ||||
| // (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters | ||||
| // and returns the number of bytes consumed from src or iShortDst or iShortSrc. | ||||
| func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int { | ||||
| 	// Force one character to be consumed. | ||||
| 	info := rb.f.info(rb.src, sp) | ||||
| 	if info.size == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if s := rb.ss.next(info); s == ssStarter { | ||||
| 		// TODO: this could be removed if we don't support merging. | ||||
| 		if rb.nrune > 0 { | ||||
| 			goto end | ||||
| 		} | ||||
| 	} else if s == ssOverflow { | ||||
| 		rb.insertCGJ() | ||||
| 		goto end | ||||
| 	} | ||||
| 	if err := rb.insertFlush(rb.src, sp, info); err != iSuccess { | ||||
| 		return int(err) | ||||
| 	} | ||||
| 	for { | ||||
| 		sp += int(info.size) | ||||
| 		if sp >= rb.nsrc { | ||||
| 			if !atEOF && !info.BoundaryAfter() { | ||||
| 				return int(iShortSrc) | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 		info = rb.f.info(rb.src, sp) | ||||
| 		if info.size == 0 { | ||||
| 			if !atEOF { | ||||
| 				return int(iShortSrc) | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 		if s := rb.ss.next(info); s == ssStarter { | ||||
| 			break | ||||
| 		} else if s == ssOverflow { | ||||
| 			rb.insertCGJ() | ||||
| 			break | ||||
| 		} | ||||
| 		if err := rb.insertFlush(rb.src, sp, info); err != iSuccess { | ||||
| 			return int(err) | ||||
| 		} | ||||
| 	} | ||||
| end: | ||||
| 	if !rb.doFlush() { | ||||
| 		return int(iShortDst) | ||||
| 	} | ||||
| 	return sp | ||||
| } | ||||
|  | ||||
| // lastRuneStart returns the runeInfo and position of the last | ||||
| // rune in buf or the zero runeInfo and -1 if no rune was found. | ||||
| func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) { | ||||
| 	p := len(buf) - 1 | ||||
| 	for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- { | ||||
| 	} | ||||
| 	if p < 0 { | ||||
| 		return Properties{}, -1 | ||||
| 	} | ||||
| 	return fd.info(inputBytes(buf), p), p | ||||
| } | ||||
|  | ||||
| // decomposeToLastBoundary finds an open segment at the end of the buffer | ||||
| // and scans it into rb. Returns the buffer minus the last segment. | ||||
| func decomposeToLastBoundary(rb *reorderBuffer) { | ||||
| 	fd := &rb.f | ||||
| 	info, i := lastRuneStart(fd, rb.out) | ||||
| 	if int(info.size) != len(rb.out)-i { | ||||
| 		// illegal trailing continuation bytes | ||||
| 		return | ||||
| 	} | ||||
| 	if info.BoundaryAfter() { | ||||
| 		return | ||||
| 	} | ||||
| 	var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order | ||||
| 	padd := 0 | ||||
| 	ss := streamSafe(0) | ||||
| 	p := len(rb.out) | ||||
| 	for { | ||||
| 		add[padd] = info | ||||
| 		v := ss.backwards(info) | ||||
| 		if v == ssOverflow { | ||||
| 			// Note that if we have an overflow, it the string we are appending to | ||||
| 			// is not correctly normalized. In this case the behavior is undefined. | ||||
| 			break | ||||
| 		} | ||||
| 		padd++ | ||||
| 		p -= int(info.size) | ||||
| 		if v == ssStarter || p < 0 { | ||||
| 			break | ||||
| 		} | ||||
| 		info, i = lastRuneStart(fd, rb.out[:p]) | ||||
| 		if int(info.size) != p-i { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	rb.ss = ss | ||||
| 	// Copy bytes for insertion as we may need to overwrite rb.out. | ||||
| 	var buf [maxBufferSize * utf8.UTFMax]byte | ||||
| 	cp := buf[:copy(buf[:], rb.out[p:])] | ||||
| 	rb.out = rb.out[:p] | ||||
| 	for padd--; padd >= 0; padd-- { | ||||
| 		info = add[padd] | ||||
| 		rb.insertUnsafe(inputBytes(cp), 0, info) | ||||
| 		cp = cp[info.size:] | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										125
									
								
								vendor/golang.org/x/text/unicode/norm/readwriter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								vendor/golang.org/x/text/unicode/norm/readwriter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,125 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package norm | ||||
|  | ||||
| import "io" | ||||
|  | ||||
| type normWriter struct { | ||||
| 	rb  reorderBuffer | ||||
| 	w   io.Writer | ||||
| 	buf []byte | ||||
| } | ||||
|  | ||||
| // Write implements the standard write interface.  If the last characters are | ||||
| // not at a normalization boundary, the bytes will be buffered for the next | ||||
| // write. The remaining bytes will be written on close. | ||||
| func (w *normWriter) Write(data []byte) (n int, err error) { | ||||
| 	// Process data in pieces to keep w.buf size bounded. | ||||
| 	const chunk = 4000 | ||||
|  | ||||
| 	for len(data) > 0 { | ||||
| 		// Normalize into w.buf. | ||||
| 		m := len(data) | ||||
| 		if m > chunk { | ||||
| 			m = chunk | ||||
| 		} | ||||
| 		w.rb.src = inputBytes(data[:m]) | ||||
| 		w.rb.nsrc = m | ||||
| 		w.buf = doAppend(&w.rb, w.buf, 0) | ||||
| 		data = data[m:] | ||||
| 		n += m | ||||
|  | ||||
| 		// Write out complete prefix, save remainder. | ||||
| 		// Note that lastBoundary looks back at most 31 runes. | ||||
| 		i := lastBoundary(&w.rb.f, w.buf) | ||||
| 		if i == -1 { | ||||
| 			i = 0 | ||||
| 		} | ||||
| 		if i > 0 { | ||||
| 			if _, err = w.w.Write(w.buf[:i]); err != nil { | ||||
| 				break | ||||
| 			} | ||||
| 			bn := copy(w.buf, w.buf[i:]) | ||||
| 			w.buf = w.buf[:bn] | ||||
| 		} | ||||
| 	} | ||||
| 	return n, err | ||||
| } | ||||
|  | ||||
| // Close forces data that remains in the buffer to be written. | ||||
| func (w *normWriter) Close() error { | ||||
| 	if len(w.buf) > 0 { | ||||
| 		_, err := w.w.Write(w.buf) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Writer returns a new writer that implements Write(b) | ||||
| // by writing f(b) to w.  The returned writer may use an | ||||
| // an internal buffer to maintain state across Write calls. | ||||
| // Calling its Close method writes any buffered data to w. | ||||
| func (f Form) Writer(w io.Writer) io.WriteCloser { | ||||
| 	wr := &normWriter{rb: reorderBuffer{}, w: w} | ||||
| 	wr.rb.init(f, nil) | ||||
| 	return wr | ||||
| } | ||||
|  | ||||
| type normReader struct { | ||||
| 	rb           reorderBuffer | ||||
| 	r            io.Reader | ||||
| 	inbuf        []byte | ||||
| 	outbuf       []byte | ||||
| 	bufStart     int | ||||
| 	lastBoundary int | ||||
| 	err          error | ||||
| } | ||||
|  | ||||
| // Read implements the standard read interface. | ||||
| func (r *normReader) Read(p []byte) (int, error) { | ||||
| 	for { | ||||
| 		if r.lastBoundary-r.bufStart > 0 { | ||||
| 			n := copy(p, r.outbuf[r.bufStart:r.lastBoundary]) | ||||
| 			r.bufStart += n | ||||
| 			if r.lastBoundary-r.bufStart > 0 { | ||||
| 				return n, nil | ||||
| 			} | ||||
| 			return n, r.err | ||||
| 		} | ||||
| 		if r.err != nil { | ||||
| 			return 0, r.err | ||||
| 		} | ||||
| 		outn := copy(r.outbuf, r.outbuf[r.lastBoundary:]) | ||||
| 		r.outbuf = r.outbuf[0:outn] | ||||
| 		r.bufStart = 0 | ||||
|  | ||||
| 		n, err := r.r.Read(r.inbuf) | ||||
| 		r.rb.src = inputBytes(r.inbuf[0:n]) | ||||
| 		r.rb.nsrc, r.err = n, err | ||||
| 		if n > 0 { | ||||
| 			r.outbuf = doAppend(&r.rb, r.outbuf, 0) | ||||
| 		} | ||||
| 		if err == io.EOF { | ||||
| 			r.lastBoundary = len(r.outbuf) | ||||
| 		} else { | ||||
| 			r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf) | ||||
| 			if r.lastBoundary == -1 { | ||||
| 				r.lastBoundary = 0 | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Reader returns a new reader that implements Read | ||||
| // by reading data from r and returning f(data). | ||||
| func (f Form) Reader(r io.Reader) io.Reader { | ||||
| 	const chunk = 4000 | ||||
| 	buf := make([]byte, chunk) | ||||
| 	rr := &normReader{rb: reorderBuffer{}, r: r, inbuf: buf} | ||||
| 	rr.rb.init(f, buf) | ||||
| 	return rr | ||||
| } | ||||
							
								
								
									
										7631
									
								
								vendor/golang.org/x/text/unicode/norm/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7631
									
								
								vendor/golang.org/x/text/unicode/norm/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										88
									
								
								vendor/golang.org/x/text/unicode/norm/transform.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								vendor/golang.org/x/text/unicode/norm/transform.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,88 @@ | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package norm | ||||
|  | ||||
| import ( | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"golang.org/x/text/transform" | ||||
| ) | ||||
|  | ||||
| // Reset implements the Reset method of the transform.Transformer interface. | ||||
| func (Form) Reset() {} | ||||
|  | ||||
| // Transform implements the Transform method of the transform.Transformer | ||||
| // interface. It may need to write segments of up to MaxSegmentSize at once. | ||||
| // Users should either catch ErrShortDst and allow dst to grow or have dst be at | ||||
| // least of size MaxTransformChunkSize to be guaranteed of progress. | ||||
| func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { | ||||
| 	n := 0 | ||||
| 	// Cap the maximum number of src bytes to check. | ||||
| 	b := src | ||||
| 	eof := atEOF | ||||
| 	if ns := len(dst); ns < len(b) { | ||||
| 		err = transform.ErrShortDst | ||||
| 		eof = false | ||||
| 		b = b[:ns] | ||||
| 	} | ||||
| 	i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof) | ||||
| 	n += copy(dst[n:], b[n:i]) | ||||
| 	if !ok { | ||||
| 		nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF) | ||||
| 		return nDst + n, nSrc + n, err | ||||
| 	} | ||||
| 	if n < len(src) && !atEOF { | ||||
| 		err = transform.ErrShortSrc | ||||
| 	} | ||||
| 	return n, n, err | ||||
| } | ||||
|  | ||||
| func flushTransform(rb *reorderBuffer) bool { | ||||
| 	// Write out (must fully fit in dst, or else it is a ErrShortDst). | ||||
| 	if len(rb.out) < rb.nrune*utf8.UTFMax { | ||||
| 		return false | ||||
| 	} | ||||
| 	rb.out = rb.out[rb.flushCopy(rb.out):] | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| var errs = []error{nil, transform.ErrShortDst, transform.ErrShortSrc} | ||||
|  | ||||
| // transform implements the transform.Transformer interface. It is only called | ||||
| // when quickSpan does not pass for a given string. | ||||
| func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { | ||||
| 	// TODO: get rid of reorderBuffer. See CL 23460044. | ||||
| 	rb := reorderBuffer{} | ||||
| 	rb.init(f, src) | ||||
| 	for { | ||||
| 		// Load segment into reorder buffer. | ||||
| 		rb.setFlusher(dst[nDst:], flushTransform) | ||||
| 		end := decomposeSegment(&rb, nSrc, atEOF) | ||||
| 		if end < 0 { | ||||
| 			return nDst, nSrc, errs[-end] | ||||
| 		} | ||||
| 		nDst = len(dst) - len(rb.out) | ||||
| 		nSrc = end | ||||
|  | ||||
| 		// Next quickSpan. | ||||
| 		end = rb.nsrc | ||||
| 		eof := atEOF | ||||
| 		if n := nSrc + len(dst) - nDst; n < end { | ||||
| 			err = transform.ErrShortDst | ||||
| 			end = n | ||||
| 			eof = false | ||||
| 		} | ||||
| 		end, ok := rb.f.quickSpan(rb.src, nSrc, end, eof) | ||||
| 		n := copy(dst[nDst:], rb.src.bytes[nSrc:end]) | ||||
| 		nSrc += n | ||||
| 		nDst += n | ||||
| 		if ok { | ||||
| 			if n < rb.nsrc && !atEOF { | ||||
| 				err = transform.ErrShortSrc | ||||
| 			} | ||||
| 			return nDst, nSrc, err | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										54
									
								
								vendor/golang.org/x/text/unicode/norm/trie.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								vendor/golang.org/x/text/unicode/norm/trie.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package norm | ||||
|  | ||||
| type valueRange struct { | ||||
| 	value  uint16 // header: value:stride | ||||
| 	lo, hi byte   // header: lo:n | ||||
| } | ||||
|  | ||||
| type sparseBlocks struct { | ||||
| 	values []valueRange | ||||
| 	offset []uint16 | ||||
| } | ||||
|  | ||||
| var nfcSparse = sparseBlocks{ | ||||
| 	values: nfcSparseValues[:], | ||||
| 	offset: nfcSparseOffset[:], | ||||
| } | ||||
|  | ||||
| var nfkcSparse = sparseBlocks{ | ||||
| 	values: nfkcSparseValues[:], | ||||
| 	offset: nfkcSparseOffset[:], | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	nfcData  = newNfcTrie(0) | ||||
| 	nfkcData = newNfkcTrie(0) | ||||
| ) | ||||
|  | ||||
| // lookupValue determines the type of block n and looks up the value for b. | ||||
| // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block | ||||
| // is a list of ranges with an accompanying value. Given a matching range r, | ||||
| // the value for b is by r.value + (b - r.lo) * stride. | ||||
| func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { | ||||
| 	offset := t.offset[n] | ||||
| 	header := t.values[offset] | ||||
| 	lo := offset + 1 | ||||
| 	hi := lo + uint16(header.lo) | ||||
| 	for lo < hi { | ||||
| 		m := lo + (hi-lo)/2 | ||||
| 		r := t.values[m] | ||||
| 		if r.lo <= b && b <= r.hi { | ||||
| 			return r.value + uint16(b-r.lo)*header.value | ||||
| 		} | ||||
| 		if b < r.lo { | ||||
| 			hi = m | ||||
| 		} else { | ||||
| 			lo = m + 1 | ||||
| 		} | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
							
								
								
									
										117
									
								
								vendor/golang.org/x/text/unicode/norm/triegen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										117
									
								
								vendor/golang.org/x/text/unicode/norm/triegen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,117 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| // Trie table generator. | ||||
| // Used by make*tables tools to generate a go file with trie data structures | ||||
| // for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte | ||||
| // sequence are used to lookup offsets in the index table to be used for the | ||||
| // next byte. The last byte is used to index into a table with 16-bit values. | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| const maxSparseEntries = 16 | ||||
|  | ||||
| type normCompacter struct { | ||||
| 	sparseBlocks [][]uint64 | ||||
| 	sparseOffset []uint16 | ||||
| 	sparseCount  int | ||||
| 	name         string | ||||
| } | ||||
|  | ||||
| func mostFrequentStride(a []uint64) int { | ||||
| 	counts := make(map[int]int) | ||||
| 	var v int | ||||
| 	for _, x := range a { | ||||
| 		if stride := int(x) - v; v != 0 && stride >= 0 { | ||||
| 			counts[stride]++ | ||||
| 		} | ||||
| 		v = int(x) | ||||
| 	} | ||||
| 	var maxs, maxc int | ||||
| 	for stride, cnt := range counts { | ||||
| 		if cnt > maxc || (cnt == maxc && stride < maxs) { | ||||
| 			maxs, maxc = stride, cnt | ||||
| 		} | ||||
| 	} | ||||
| 	return maxs | ||||
| } | ||||
|  | ||||
| func countSparseEntries(a []uint64) int { | ||||
| 	stride := mostFrequentStride(a) | ||||
| 	var v, count int | ||||
| 	for _, tv := range a { | ||||
| 		if int(tv)-v != stride { | ||||
| 			if tv != 0 { | ||||
| 				count++ | ||||
| 			} | ||||
| 		} | ||||
| 		v = int(tv) | ||||
| 	} | ||||
| 	return count | ||||
| } | ||||
|  | ||||
| func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { | ||||
| 	if n := countSparseEntries(v); n <= maxSparseEntries { | ||||
| 		return (n+1)*4 + 2, true | ||||
| 	} | ||||
| 	return 0, false | ||||
| } | ||||
|  | ||||
| func (c *normCompacter) Store(v []uint64) uint32 { | ||||
| 	h := uint32(len(c.sparseOffset)) | ||||
| 	c.sparseBlocks = append(c.sparseBlocks, v) | ||||
| 	c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) | ||||
| 	c.sparseCount += countSparseEntries(v) + 1 | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| func (c *normCompacter) Handler() string { | ||||
| 	return c.name + "Sparse.lookup" | ||||
| } | ||||
|  | ||||
| func (c *normCompacter) Print(w io.Writer) (retErr error) { | ||||
| 	p := func(f string, x ...interface{}) { | ||||
| 		if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { | ||||
| 			retErr = err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	ls := len(c.sparseBlocks) | ||||
| 	p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) | ||||
| 	p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) | ||||
|  | ||||
| 	ns := c.sparseCount | ||||
| 	p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) | ||||
| 	p("var %sSparseValues = [%d]valueRange {", c.name, ns) | ||||
| 	for i, b := range c.sparseBlocks { | ||||
| 		p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) | ||||
| 		var v int | ||||
| 		stride := mostFrequentStride(b) | ||||
| 		n := countSparseEntries(b) | ||||
| 		p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) | ||||
| 		for i, nv := range b { | ||||
| 			if int(nv)-v != stride { | ||||
| 				if v != 0 { | ||||
| 					p(",hi:%#02x},", 0x80+i-1) | ||||
| 				} | ||||
| 				if nv != 0 { | ||||
| 					p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) | ||||
| 				} | ||||
| 			} | ||||
| 			v = int(nv) | ||||
| 		} | ||||
| 		if v != 0 { | ||||
| 			p(",hi:%#02x},", 0x80+len(b)-1) | ||||
| 		} | ||||
| 	} | ||||
| 	p("\n}\n\n") | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										113
									
								
								vendor/golang.org/x/text/unicode/rangetable/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								vendor/golang.org/x/text/unicode/rangetable/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,113 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
|  | ||||
| 	"golang.org/x/text/internal/gen" | ||||
| 	"golang.org/x/text/internal/ucd" | ||||
| 	"golang.org/x/text/unicode/rangetable" | ||||
| ) | ||||
|  | ||||
| var versionList = flag.String("versions", "", | ||||
| 	"list of versions for which to generate RangeTables") | ||||
|  | ||||
| const bootstrapMessage = `No versions specified. | ||||
| To bootstrap the code generation, run: | ||||
| 	go run gen.go --versions=4.1.0,5.0.0,6.0.0,6.1.0,6.2.0,6.3.0,7.0.0 | ||||
|  | ||||
| and ensure that the latest versions are included by checking: | ||||
| 	http://www.unicode.org/Public/` | ||||
|  | ||||
| func getVersions() []string { | ||||
| 	if *versionList == "" { | ||||
| 		log.Fatal(bootstrapMessage) | ||||
| 	} | ||||
|  | ||||
| 	versions := strings.Split(*versionList, ",") | ||||
| 	sort.Strings(versions) | ||||
|  | ||||
| 	// Ensure that at least the current version is included. | ||||
| 	for _, v := range versions { | ||||
| 		if v == gen.UnicodeVersion() { | ||||
| 			return versions | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	versions = append(versions, gen.UnicodeVersion()) | ||||
| 	sort.Strings(versions) | ||||
| 	return versions | ||||
| } | ||||
|  | ||||
| func main() { | ||||
| 	gen.Init() | ||||
|  | ||||
| 	versions := getVersions() | ||||
|  | ||||
| 	w := &bytes.Buffer{} | ||||
|  | ||||
| 	fmt.Fprintf(w, "//go:generate go run gen.go --versions=%s\n\n", strings.Join(versions, ",")) | ||||
| 	fmt.Fprintf(w, "import \"unicode\"\n\n") | ||||
|  | ||||
| 	vstr := func(s string) string { return strings.Replace(s, ".", "_", -1) } | ||||
|  | ||||
| 	fmt.Fprintf(w, "var assigned = map[string]*unicode.RangeTable{\n") | ||||
| 	for _, v := range versions { | ||||
| 		fmt.Fprintf(w, "\t%q: assigned%s,\n", v, vstr(v)) | ||||
| 	} | ||||
| 	fmt.Fprintf(w, "}\n\n") | ||||
|  | ||||
| 	var size int | ||||
| 	for _, v := range versions { | ||||
| 		assigned := []rune{} | ||||
|  | ||||
| 		r := gen.Open("http://www.unicode.org/Public/", "", v+"/ucd/UnicodeData.txt") | ||||
| 		ucd.Parse(r, func(p *ucd.Parser) { | ||||
| 			assigned = append(assigned, p.Rune(0)) | ||||
| 		}) | ||||
|  | ||||
| 		rt := rangetable.New(assigned...) | ||||
| 		sz := int(reflect.TypeOf(unicode.RangeTable{}).Size()) | ||||
| 		sz += int(reflect.TypeOf(unicode.Range16{}).Size()) * len(rt.R16) | ||||
| 		sz += int(reflect.TypeOf(unicode.Range32{}).Size()) * len(rt.R32) | ||||
|  | ||||
| 		fmt.Fprintf(w, "// size %d bytes (%d KiB)\n", sz, sz/1024) | ||||
| 		fmt.Fprintf(w, "var assigned%s = ", vstr(v)) | ||||
| 		print(w, rt) | ||||
|  | ||||
| 		size += sz | ||||
| 	} | ||||
|  | ||||
| 	fmt.Fprintf(w, "// Total size %d bytes (%d KiB)\n", size, size/1024) | ||||
|  | ||||
| 	gen.WriteGoFile("tables.go", "rangetable", w.Bytes()) | ||||
| } | ||||
|  | ||||
| func print(w io.Writer, rt *unicode.RangeTable) { | ||||
| 	fmt.Fprintln(w, "&unicode.RangeTable{") | ||||
| 	fmt.Fprintln(w, "\tR16: []unicode.Range16{") | ||||
| 	for _, r := range rt.R16 { | ||||
| 		fmt.Fprintf(w, "\t\t{%#04x, %#04x, %d},\n", r.Lo, r.Hi, r.Stride) | ||||
| 	} | ||||
| 	fmt.Fprintln(w, "\t},") | ||||
| 	fmt.Fprintln(w, "\tR32: []unicode.Range32{") | ||||
| 	for _, r := range rt.R32 { | ||||
| 		fmt.Fprintf(w, "\t\t{%#08x, %#08x, %d},\n", r.Lo, r.Hi, r.Stride) | ||||
| 	} | ||||
| 	fmt.Fprintln(w, "\t},") | ||||
| 	fmt.Fprintf(w, "\tLatinOffset: %d,\n", rt.LatinOffset) | ||||
| 	fmt.Fprintf(w, "}\n\n") | ||||
| } | ||||
							
								
								
									
										260
									
								
								vendor/golang.org/x/text/unicode/rangetable/merge.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										260
									
								
								vendor/golang.org/x/text/unicode/rangetable/merge.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,260 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package rangetable | ||||
|  | ||||
| import ( | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| // atEnd is used to mark a completed iteration. | ||||
| const atEnd = unicode.MaxRune + 1 | ||||
|  | ||||
| // Merge returns a new RangeTable that is the union of the given tables. | ||||
| // It can also be used to compact user-created RangeTables. The entries in | ||||
| // R16 and R32 for any given RangeTable should be sorted and non-overlapping. | ||||
| // | ||||
| // A lookup in the resulting table can be several times faster than using In | ||||
| // directly on the ranges. Merge is an expensive operation, however, and only | ||||
| // makes sense if one intends to use the result for more than a couple of | ||||
| // hundred lookups. | ||||
| func Merge(ranges ...*unicode.RangeTable) *unicode.RangeTable { | ||||
| 	rt := &unicode.RangeTable{} | ||||
| 	if len(ranges) == 0 { | ||||
| 		return rt | ||||
| 	} | ||||
|  | ||||
| 	iter := tablesIter(make([]tableIndex, len(ranges))) | ||||
|  | ||||
| 	for i, t := range ranges { | ||||
| 		iter[i] = tableIndex{t, 0, atEnd} | ||||
| 		if len(t.R16) > 0 { | ||||
| 			iter[i].next = rune(t.R16[0].Lo) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if r0 := iter.next16(); r0.Stride != 0 { | ||||
| 		for { | ||||
| 			r1 := iter.next16() | ||||
| 			if r1.Stride == 0 { | ||||
| 				rt.R16 = append(rt.R16, r0) | ||||
| 				break | ||||
| 			} | ||||
| 			stride := r1.Lo - r0.Hi | ||||
| 			if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) { | ||||
| 				// Fully merge the next range into the previous one. | ||||
| 				r0.Hi, r0.Stride = r1.Hi, stride | ||||
| 				continue | ||||
| 			} else if stride == r0.Stride { | ||||
| 				// Move the first element of r1 to r0. This may eliminate an | ||||
| 				// entry. | ||||
| 				r0.Hi = r1.Lo | ||||
| 				r0.Stride = stride | ||||
| 				r1.Lo = r1.Lo + r1.Stride | ||||
| 				if r1.Lo > r1.Hi { | ||||
| 					continue | ||||
| 				} | ||||
| 			} | ||||
| 			rt.R16 = append(rt.R16, r0) | ||||
| 			r0 = r1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for i, t := range ranges { | ||||
| 		iter[i] = tableIndex{t, 0, atEnd} | ||||
| 		if len(t.R32) > 0 { | ||||
| 			iter[i].next = rune(t.R32[0].Lo) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if r0 := iter.next32(); r0.Stride != 0 { | ||||
| 		for { | ||||
| 			r1 := iter.next32() | ||||
| 			if r1.Stride == 0 { | ||||
| 				rt.R32 = append(rt.R32, r0) | ||||
| 				break | ||||
| 			} | ||||
| 			stride := r1.Lo - r0.Hi | ||||
| 			if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) { | ||||
| 				// Fully merge the next range into the previous one. | ||||
| 				r0.Hi, r0.Stride = r1.Hi, stride | ||||
| 				continue | ||||
| 			} else if stride == r0.Stride { | ||||
| 				// Move the first element of r1 to r0. This may eliminate an | ||||
| 				// entry. | ||||
| 				r0.Hi = r1.Lo | ||||
| 				r1.Lo = r1.Lo + r1.Stride | ||||
| 				if r1.Lo > r1.Hi { | ||||
| 					continue | ||||
| 				} | ||||
| 			} | ||||
| 			rt.R32 = append(rt.R32, r0) | ||||
| 			r0 = r1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for i := 0; i < len(rt.R16) && rt.R16[i].Hi <= unicode.MaxLatin1; i++ { | ||||
| 		rt.LatinOffset = i + 1 | ||||
| 	} | ||||
|  | ||||
| 	return rt | ||||
| } | ||||
|  | ||||
| type tableIndex struct { | ||||
| 	t    *unicode.RangeTable | ||||
| 	p    uint32 | ||||
| 	next rune | ||||
| } | ||||
|  | ||||
| type tablesIter []tableIndex | ||||
|  | ||||
| // sortIter does an insertion sort using the next field of tableIndex. Insertion | ||||
| // sort is a good sorting algorithm for this case. | ||||
| func sortIter(t []tableIndex) { | ||||
| 	for i := range t { | ||||
| 		for j := i; j > 0 && t[j-1].next > t[j].next; j-- { | ||||
| 			t[j], t[j-1] = t[j-1], t[j] | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // next16 finds the ranged to be added to the table. If ranges overlap between | ||||
| // multiple tables it clips the result to a non-overlapping range if the | ||||
| // elements are not fully subsumed. It returns a zero range if there are no more | ||||
| // ranges. | ||||
| func (ti tablesIter) next16() unicode.Range16 { | ||||
| 	sortIter(ti) | ||||
|  | ||||
| 	t0 := ti[0] | ||||
| 	if t0.next == atEnd { | ||||
| 		return unicode.Range16{} | ||||
| 	} | ||||
| 	r0 := t0.t.R16[t0.p] | ||||
| 	r0.Lo = uint16(t0.next) | ||||
|  | ||||
| 	// We restrict the Hi of the current range if it overlaps with another range. | ||||
| 	for i := range ti { | ||||
| 		tn := ti[i] | ||||
| 		// Since our tableIndices are sorted by next, we can break if the there | ||||
| 		// is no overlap. The first value of a next range can always be merged | ||||
| 		// into the current one, so we can break in case of equality as well. | ||||
| 		if rune(r0.Hi) <= tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R16[tn.p] | ||||
| 		rn.Lo = uint16(tn.next) | ||||
|  | ||||
| 		// Limit r0.Hi based on next ranges in list, but allow it to overlap | ||||
| 		// with ranges as long as it subsumes it. | ||||
| 		m := (rn.Lo - r0.Lo) % r0.Stride | ||||
| 		if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) { | ||||
| 			// Overlap, take the min of the two Hi values: for simplicity's sake | ||||
| 			// we only process one range at a time. | ||||
| 			if r0.Hi > rn.Hi { | ||||
| 				r0.Hi = rn.Hi | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Not a compatible stride. Set to the last possible value before | ||||
| 			// rn.Lo, but ensure there is at least one value. | ||||
| 			if x := rn.Lo - m; r0.Lo <= x { | ||||
| 				r0.Hi = x | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Update the next values for each table. | ||||
| 	for i := range ti { | ||||
| 		tn := &ti[i] | ||||
| 		if rune(r0.Hi) < tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R16[tn.p] | ||||
| 		stride := rune(rn.Stride) | ||||
| 		tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride)) | ||||
| 		if rune(rn.Hi) < tn.next { | ||||
| 			if tn.p++; int(tn.p) == len(tn.t.R16) { | ||||
| 				tn.next = atEnd | ||||
| 			} else { | ||||
| 				tn.next = rune(tn.t.R16[tn.p].Lo) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if r0.Lo == r0.Hi { | ||||
| 		r0.Stride = 1 | ||||
| 	} | ||||
|  | ||||
| 	return r0 | ||||
| } | ||||
|  | ||||
| // next32 finds the ranged to be added to the table. If ranges overlap between | ||||
| // multiple tables it clips the result to a non-overlapping range if the | ||||
| // elements are not fully subsumed. It returns a zero range if there are no more | ||||
| // ranges. | ||||
| func (ti tablesIter) next32() unicode.Range32 { | ||||
| 	sortIter(ti) | ||||
|  | ||||
| 	t0 := ti[0] | ||||
| 	if t0.next == atEnd { | ||||
| 		return unicode.Range32{} | ||||
| 	} | ||||
| 	r0 := t0.t.R32[t0.p] | ||||
| 	r0.Lo = uint32(t0.next) | ||||
|  | ||||
| 	// We restrict the Hi of the current range if it overlaps with another range. | ||||
| 	for i := range ti { | ||||
| 		tn := ti[i] | ||||
| 		// Since our tableIndices are sorted by next, we can break if the there | ||||
| 		// is no overlap. The first value of a next range can always be merged | ||||
| 		// into the current one, so we can break in case of equality as well. | ||||
| 		if rune(r0.Hi) <= tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R32[tn.p] | ||||
| 		rn.Lo = uint32(tn.next) | ||||
|  | ||||
| 		// Limit r0.Hi based on next ranges in list, but allow it to overlap | ||||
| 		// with ranges as long as it subsumes it. | ||||
| 		m := (rn.Lo - r0.Lo) % r0.Stride | ||||
| 		if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) { | ||||
| 			// Overlap, take the min of the two Hi values: for simplicity's sake | ||||
| 			// we only process one range at a time. | ||||
| 			if r0.Hi > rn.Hi { | ||||
| 				r0.Hi = rn.Hi | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Not a compatible stride. Set to the last possible value before | ||||
| 			// rn.Lo, but ensure there is at least one value. | ||||
| 			if x := rn.Lo - m; r0.Lo <= x { | ||||
| 				r0.Hi = x | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Update the next values for each table. | ||||
| 	for i := range ti { | ||||
| 		tn := &ti[i] | ||||
| 		if rune(r0.Hi) < tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R32[tn.p] | ||||
| 		stride := rune(rn.Stride) | ||||
| 		tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride)) | ||||
| 		if rune(rn.Hi) < tn.next { | ||||
| 			if tn.p++; int(tn.p) == len(tn.t.R32) { | ||||
| 				tn.next = atEnd | ||||
| 			} else { | ||||
| 				tn.next = rune(tn.t.R32[tn.p].Lo) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if r0.Lo == r0.Hi { | ||||
| 		r0.Stride = 1 | ||||
| 	} | ||||
|  | ||||
| 	return r0 | ||||
| } | ||||
							
								
								
									
										70
									
								
								vendor/golang.org/x/text/unicode/rangetable/rangetable.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								vendor/golang.org/x/text/unicode/rangetable/rangetable.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| // Copyright 2015 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package rangetable provides utilities for creating and inspecting | ||||
| // unicode.RangeTables. | ||||
| package rangetable | ||||
|  | ||||
| import ( | ||||
| 	"sort" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| // New creates a RangeTable from the given runes, which may contain duplicates. | ||||
| func New(r ...rune) *unicode.RangeTable { | ||||
| 	if len(r) == 0 { | ||||
| 		return &unicode.RangeTable{} | ||||
| 	} | ||||
|  | ||||
| 	sort.Sort(byRune(r)) | ||||
|  | ||||
| 	// Remove duplicates. | ||||
| 	k := 1 | ||||
| 	for i := 1; i < len(r); i++ { | ||||
| 		if r[k-1] != r[i] { | ||||
| 			r[k] = r[i] | ||||
| 			k++ | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var rt unicode.RangeTable | ||||
| 	for _, r := range r[:k] { | ||||
| 		if r <= 0xFFFF { | ||||
| 			rt.R16 = append(rt.R16, unicode.Range16{Lo: uint16(r), Hi: uint16(r), Stride: 1}) | ||||
| 		} else { | ||||
| 			rt.R32 = append(rt.R32, unicode.Range32{Lo: uint32(r), Hi: uint32(r), Stride: 1}) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Optimize RangeTable. | ||||
| 	return Merge(&rt) | ||||
| } | ||||
|  | ||||
| type byRune []rune | ||||
|  | ||||
| func (r byRune) Len() int           { return len(r) } | ||||
| func (r byRune) Swap(i, j int)      { r[i], r[j] = r[j], r[i] } | ||||
| func (r byRune) Less(i, j int) bool { return r[i] < r[j] } | ||||
|  | ||||
| // Visit visits all runes in the given RangeTable in order, calling fn for each. | ||||
| func Visit(rt *unicode.RangeTable, fn func(rune)) { | ||||
| 	for _, r16 := range rt.R16 { | ||||
| 		for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) { | ||||
| 			fn(r) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, r32 := range rt.R32 { | ||||
| 		for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) { | ||||
| 			fn(r) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Assigned returns a RangeTable with all assigned code points for a given | ||||
| // Unicode version. This includes graphic, format, control, and private-use | ||||
| // characters. It returns nil if the data for the given version is not | ||||
| // available. | ||||
| func Assigned(version string) *unicode.RangeTable { | ||||
| 	return assigned[version] | ||||
| } | ||||
							
								
								
									
										5735
									
								
								vendor/golang.org/x/text/unicode/rangetable/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										5735
									
								
								vendor/golang.org/x/text/unicode/rangetable/tables.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										202
									
								
								vendor/google.golang.org/genproto/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								vendor/google.golang.org/genproto/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,202 @@ | ||||
|  | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										28
									
								
								vendor/google.golang.org/genproto/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/google.golang.org/genproto/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| Go generated proto packages | ||||
| =========================== | ||||
|  | ||||
| [](https://travis-ci.org/google/go-genproto) | ||||
| [](https://godoc.org/google.golang.org/genproto) | ||||
|  | ||||
| > **IMPORTANT** This repository is currently experimental. The structure | ||||
| > of the contained packages is subject to change. Please see the original | ||||
| > source repositories (listed below) to find out the status of the each | ||||
| > protocol buffer's associated service. | ||||
|  | ||||
| This repository contains the generated Go packages for common protocol buffer | ||||
| types, and the generated [gRPC][1] code necessary for interacting with Google's gRPC | ||||
| APIs. | ||||
|  | ||||
| There are two sources for the proto files used in this repository: | ||||
|  | ||||
| 1. [google/protobuf][2]: the code in the `protobuf` and `ptypes` subdirectories | ||||
|    is derived from this repo. The messages in `protobuf` are used to describe | ||||
|    protocol buffer messages themselves. The messages under `ptypes` define the | ||||
|    common well-known types. | ||||
| 2. [googleapis/googleapis][3]: the code in the `googleapis` is derived from this | ||||
|    repo. The packages here contain types specifically for interacting with Google | ||||
|    APIs. | ||||
|  | ||||
| [1]: http://grpc.io | ||||
| [2]: https://github.com/google/protobuf/ | ||||
| [3]: https://github.com/googleapis/googleapis/ | ||||
							
								
								
									
										144
									
								
								vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,144 @@ | ||||
| // Code generated by protoc-gen-go. | ||||
| // source: google/rpc/status.proto | ||||
| // DO NOT EDIT! | ||||
|  | ||||
| /* | ||||
| Package status is a generated protocol buffer package. | ||||
|  | ||||
| It is generated from these files: | ||||
| 	google/rpc/status.proto | ||||
|  | ||||
| It has these top-level messages: | ||||
| 	Status | ||||
| */ | ||||
| package status | ||||
|  | ||||
| import proto "github.com/golang/protobuf/proto" | ||||
| import fmt "fmt" | ||||
| import math "math" | ||||
| import google_protobuf "github.com/golang/protobuf/ptypes/any" | ||||
|  | ||||
| // Reference imports to suppress errors if they are not otherwise used. | ||||
| var _ = proto.Marshal | ||||
| var _ = fmt.Errorf | ||||
| var _ = math.Inf | ||||
|  | ||||
| // This is a compile-time assertion to ensure that this generated file | ||||
| // is compatible with the proto package it is being compiled against. | ||||
| // A compilation error at this line likely means your copy of the | ||||
| // proto package needs to be updated. | ||||
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | ||||
|  | ||||
| // The `Status` type defines a logical error model that is suitable for different | ||||
| // programming environments, including REST APIs and RPC APIs. It is used by | ||||
| // [gRPC](https://github.com/grpc). The error model is designed to be: | ||||
| // | ||||
| // - Simple to use and understand for most users | ||||
| // - Flexible enough to meet unexpected needs | ||||
| // | ||||
| // # Overview | ||||
| // | ||||
| // The `Status` message contains three pieces of data: error code, error message, | ||||
| // and error details. The error code should be an enum value of | ||||
| // [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The | ||||
| // error message should be a developer-facing English message that helps | ||||
| // developers *understand* and *resolve* the error. If a localized user-facing | ||||
| // error message is needed, put the localized message in the error details or | ||||
| // localize it in the client. The optional error details may contain arbitrary | ||||
| // information about the error. There is a predefined set of error detail types | ||||
| // in the package `google.rpc` which can be used for common error conditions. | ||||
| // | ||||
| // # Language mapping | ||||
| // | ||||
| // The `Status` message is the logical representation of the error model, but it | ||||
| // is not necessarily the actual wire format. When the `Status` message is | ||||
| // exposed in different client libraries and different wire protocols, it can be | ||||
| // mapped differently. For example, it will likely be mapped to some exceptions | ||||
| // in Java, but more likely mapped to some error codes in C. | ||||
| // | ||||
| // # Other uses | ||||
| // | ||||
| // The error model and the `Status` message can be used in a variety of | ||||
| // environments, either with or without APIs, to provide a | ||||
| // consistent developer experience across different environments. | ||||
| // | ||||
| // Example uses of this error model include: | ||||
| // | ||||
| // - Partial errors. If a service needs to return partial errors to the client, | ||||
| //     it may embed the `Status` in the normal response to indicate the partial | ||||
| //     errors. | ||||
| // | ||||
| // - Workflow errors. A typical workflow has multiple steps. Each step may | ||||
| //     have a `Status` message for error reporting purpose. | ||||
| // | ||||
| // - Batch operations. If a client uses batch request and batch response, the | ||||
| //     `Status` message should be used directly inside batch response, one for | ||||
| //     each error sub-response. | ||||
| // | ||||
| // - Asynchronous operations. If an API call embeds asynchronous operation | ||||
| //     results in its response, the status of those operations should be | ||||
| //     represented directly using the `Status` message. | ||||
| // | ||||
| // - Logging. If some API errors are stored in logs, the message `Status` could | ||||
| //     be used directly after any stripping needed for security/privacy reasons. | ||||
| type Status struct { | ||||
| 	// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. | ||||
| 	Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` | ||||
| 	// A developer-facing error message, which should be in English. Any | ||||
| 	// user-facing error message should be localized and sent in the | ||||
| 	// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. | ||||
| 	Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` | ||||
| 	// A list of messages that carry the error details.  There will be a | ||||
| 	// common set of message types for APIs to use. | ||||
| 	Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *Status) Reset()                    { *m = Status{} } | ||||
| func (m *Status) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Status) ProtoMessage()               {} | ||||
| func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | ||||
|  | ||||
| func (m *Status) GetCode() int32 { | ||||
| 	if m != nil { | ||||
| 		return m.Code | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Status) GetMessage() string { | ||||
| 	if m != nil { | ||||
| 		return m.Message | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Status) GetDetails() []*google_protobuf.Any { | ||||
| 	if m != nil { | ||||
| 		return m.Details | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func init() { | ||||
| 	proto.RegisterType((*Status)(nil), "google.rpc.Status") | ||||
| } | ||||
|  | ||||
| func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) } | ||||
|  | ||||
| var fileDescriptor0 = []byte{ | ||||
| 	// 209 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, | ||||
| 	0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, | ||||
| 	0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, | ||||
| 	0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, | ||||
| 	0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, | ||||
| 	0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, | ||||
| 	0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, | ||||
| 	0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, | ||||
| 	0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, | ||||
| 	0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, | ||||
| 	0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, | ||||
| 	0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, | ||||
| 	0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, | ||||
| 	0x00, | ||||
| } | ||||
							
								
								
									
										20
									
								
								vendor/google.golang.org/grpc/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								vendor/google.golang.org/grpc/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,4 +1,4 @@ | ||||
| #gRPC-Go | ||||
| # gRPC-Go | ||||
|  | ||||
| [](https://travis-ci.org/grpc/grpc-go) [](https://godoc.org/google.golang.org/grpc) | ||||
|  | ||||
| @@ -16,23 +16,7 @@ $ go get google.golang.org/grpc | ||||
| Prerequisites | ||||
| ------------- | ||||
|  | ||||
| This requires Go 1.5 or later. | ||||
|  | ||||
| A note on the version used: significant performance improvements in benchmarks | ||||
| of grpc-go have been seen by upgrading the go version from 1.5 to the latest | ||||
| 1.7.1. | ||||
|  | ||||
| From https://golang.org/doc/install, one way to install the latest version of go is: | ||||
| ``` | ||||
| $ GO_VERSION=1.7.1 | ||||
| $ OS=linux | ||||
| $ ARCH=amd64 | ||||
| $ curl -O https://storage.googleapis.com/golang/go${GO_VERSION}.${OS}-${ARCH}.tar.gz | ||||
| $ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz | ||||
| $ # Put go on the PATH, keep the usual installation dir | ||||
| $ sudo ln -s /usr/local/go/bin/go /usr/bin/go | ||||
| $ rm go$GO_VERSION.$OS-$ARCH.tar.gz | ||||
| ``` | ||||
| This requires Go 1.6 or later. | ||||
|  | ||||
| Constraints | ||||
| ----------- | ||||
|   | ||||
							
								
								
									
										100
									
								
								vendor/google.golang.org/grpc/call.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										100
									
								
								vendor/google.golang.org/grpc/call.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -36,13 +36,14 @@ package grpc | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"time" | ||||
|  | ||||
| 	"golang.org/x/net/context" | ||||
| 	"golang.org/x/net/trace" | ||||
| 	"google.golang.org/grpc/codes" | ||||
| 	"google.golang.org/grpc/peer" | ||||
| 	"google.golang.org/grpc/stats" | ||||
| 	"google.golang.org/grpc/status" | ||||
| 	"google.golang.org/grpc/transport" | ||||
| ) | ||||
|  | ||||
| @@ -66,34 +67,33 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran | ||||
| 	} | ||||
| 	p := &parser{r: stream} | ||||
| 	var inPayload *stats.InPayload | ||||
| 	if stats.On() { | ||||
| 	if dopts.copts.StatsHandler != nil { | ||||
| 		inPayload = &stats.InPayload{ | ||||
| 			Client: true, | ||||
| 		} | ||||
| 	} | ||||
| 	for { | ||||
| 		if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil { | ||||
| 		if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil { | ||||
| 			if err == io.EOF { | ||||
| 				break | ||||
| 			} | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK { | ||||
| 	if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK { | ||||
| 		// TODO in the current implementation, inTrailer may be handled before inPayload in some cases. | ||||
| 		// Fix the order if necessary. | ||||
| 		stats.HandleRPC(ctx, inPayload) | ||||
| 		dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) | ||||
| 	} | ||||
| 	c.trailerMD = stream.Trailer() | ||||
| 	if peer, ok := peer.FromContext(stream.Context()); ok { | ||||
| 		c.peer = peer | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // sendRequest writes out various information of an RPC such as Context and Message. | ||||
| func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { | ||||
| 	stream, err := t.NewStream(ctx, callHdr) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { | ||||
| 	defer func() { | ||||
| 		if err != nil { | ||||
| 			// If err is connection error, t will be closed, no need to close stream here. | ||||
| @@ -109,28 +109,28 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd | ||||
| 	if compressor != nil { | ||||
| 		cbuf = new(bytes.Buffer) | ||||
| 	} | ||||
| 	if stats.On() { | ||||
| 	if dopts.copts.StatsHandler != nil { | ||||
| 		outPayload = &stats.OutPayload{ | ||||
| 			Client: true, | ||||
| 		} | ||||
| 	} | ||||
| 	outBuf, err := encode(codec, args, compressor, cbuf, outPayload) | ||||
| 	outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload) | ||||
| 	if err != nil { | ||||
| 		return nil, Errorf(codes.Internal, "grpc: %v", err) | ||||
| 		return Errorf(codes.Internal, "grpc: %v", err) | ||||
| 	} | ||||
| 	err = t.Write(stream, outBuf, opts) | ||||
| 	if err == nil && outPayload != nil { | ||||
| 		outPayload.SentTime = time.Now() | ||||
| 		stats.HandleRPC(ctx, outPayload) | ||||
| 		dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) | ||||
| 	} | ||||
| 	// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method | ||||
| 	// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following | ||||
| 	// recvResponse to get the final status. | ||||
| 	if err != nil && err != io.EOF { | ||||
| 		return nil, err | ||||
| 		return err | ||||
| 	} | ||||
| 	// Sent successfully. | ||||
| 	return stream, nil | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Invoke sends the RPC request on the wire and returns after response is received. | ||||
| @@ -145,6 +145,14 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli | ||||
|  | ||||
| func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { | ||||
| 	c := defaultCallInfo | ||||
| 	if mc, ok := cc.getMethodConfig(method); ok { | ||||
| 		c.failFast = !mc.WaitForReady | ||||
| 		if mc.Timeout > 0 { | ||||
| 			var cancel context.CancelFunc | ||||
| 			ctx, cancel = context.WithTimeout(ctx, mc.Timeout) | ||||
| 			defer cancel() | ||||
| 		} | ||||
| 	} | ||||
| 	for _, o := range opts { | ||||
| 		if err := o.before(&c); err != nil { | ||||
| 			return toRPCErr(err) | ||||
| @@ -171,23 +179,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli | ||||
| 			} | ||||
| 		}() | ||||
| 	} | ||||
| 	if stats.On() { | ||||
| 		ctx = stats.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) | ||||
| 	ctx = newContextWithRPCInfo(ctx) | ||||
| 	sh := cc.dopts.copts.StatsHandler | ||||
| 	if sh != nil { | ||||
| 		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) | ||||
| 		begin := &stats.Begin{ | ||||
| 			Client:    true, | ||||
| 			BeginTime: time.Now(), | ||||
| 			FailFast:  c.failFast, | ||||
| 		} | ||||
| 		stats.HandleRPC(ctx, begin) | ||||
| 		sh.HandleRPC(ctx, begin) | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		if stats.On() { | ||||
| 		if sh != nil { | ||||
| 			end := &stats.End{ | ||||
| 				Client:  true, | ||||
| 				EndTime: time.Now(), | ||||
| 				Error:   e, | ||||
| 			} | ||||
| 			stats.HandleRPC(ctx, end) | ||||
| 			sh.HandleRPC(ctx, end) | ||||
| 		} | ||||
| 	}() | ||||
| 	topts := &transport.Options{ | ||||
| @@ -211,13 +221,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli | ||||
| 		if cc.dopts.cp != nil { | ||||
| 			callHdr.SendCompress = cc.dopts.cp.Type() | ||||
| 		} | ||||
|  | ||||
| 		gopts := BalancerGetOptions{ | ||||
| 			BlockingWait: !c.failFast, | ||||
| 		} | ||||
| 		t, put, err = cc.getTransport(ctx, gopts) | ||||
| 		if err != nil { | ||||
| 			// TODO(zhaoq): Probably revisit the error handling. | ||||
| 			if _, ok := err.(*rpcError); ok { | ||||
| 			if _, ok := status.FromError(err); ok { | ||||
| 				return err | ||||
| 			} | ||||
| 			if err == errConnClosing || err == errConnUnavailable { | ||||
| @@ -232,19 +243,35 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli | ||||
| 		if c.traceInfo.tr != nil { | ||||
| 			c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) | ||||
| 		} | ||||
| 		stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) | ||||
| 		stream, err = t.NewStream(ctx, callHdr) | ||||
| 		if err != nil { | ||||
| 			if put != nil { | ||||
| 				if _, ok := err.(transport.ConnectionError); ok { | ||||
| 					// If error is connection error, transport was sending data on wire, | ||||
| 					// and we are not sure if anything has been sent on wire. | ||||
| 					// If error is not connection error, we are sure nothing has been sent. | ||||
| 					updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) | ||||
| 				} | ||||
| 				put() | ||||
| 			} | ||||
| 			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { | ||||
| 				continue | ||||
| 			} | ||||
| 			return toRPCErr(err) | ||||
| 		} | ||||
| 		err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, stream, t, args, topts) | ||||
| 		if err != nil { | ||||
| 			if put != nil { | ||||
| 				updateRPCInfoInContext(ctx, rpcInfo{ | ||||
| 					bytesSent:     stream.BytesSent(), | ||||
| 					bytesReceived: stream.BytesReceived(), | ||||
| 				}) | ||||
| 				put() | ||||
| 				put = nil | ||||
| 			} | ||||
| 			// Retry a non-failfast RPC when | ||||
| 			// i) there is a connection error; or | ||||
| 			// ii) the server started to drain before this RPC was initiated. | ||||
| 			if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { | ||||
| 				if c.failFast { | ||||
| 					return toRPCErr(err) | ||||
| 				} | ||||
| 			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { | ||||
| 				continue | ||||
| 			} | ||||
| 			return toRPCErr(err) | ||||
| @@ -252,13 +279,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli | ||||
| 		err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) | ||||
| 		if err != nil { | ||||
| 			if put != nil { | ||||
| 				updateRPCInfoInContext(ctx, rpcInfo{ | ||||
| 					bytesSent:     stream.BytesSent(), | ||||
| 					bytesReceived: stream.BytesReceived(), | ||||
| 				}) | ||||
| 				put() | ||||
| 				put = nil | ||||
| 			} | ||||
| 			if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { | ||||
| 				if c.failFast { | ||||
| 					return toRPCErr(err) | ||||
| 				} | ||||
| 			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { | ||||
| 				continue | ||||
| 			} | ||||
| 			return toRPCErr(err) | ||||
| @@ -268,9 +295,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli | ||||
| 		} | ||||
| 		t.CloseStream(stream, nil) | ||||
| 		if put != nil { | ||||
| 			updateRPCInfoInContext(ctx, rpcInfo{ | ||||
| 				bytesSent:     stream.BytesSent(), | ||||
| 				bytesReceived: stream.BytesReceived(), | ||||
| 			}) | ||||
| 			put() | ||||
| 			put = nil | ||||
| 		} | ||||
| 		return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) | ||||
| 		return stream.Status().Err() | ||||
| 	} | ||||
| } | ||||
|   | ||||
							
								
								
									
										247
									
								
								vendor/google.golang.org/grpc/clientconn.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										247
									
								
								vendor/google.golang.org/grpc/clientconn.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -36,8 +36,8 @@ package grpc | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"net" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| @@ -45,6 +45,8 @@ import ( | ||||
| 	"golang.org/x/net/trace" | ||||
| 	"google.golang.org/grpc/credentials" | ||||
| 	"google.golang.org/grpc/grpclog" | ||||
| 	"google.golang.org/grpc/keepalive" | ||||
| 	"google.golang.org/grpc/stats" | ||||
| 	"google.golang.org/grpc/transport" | ||||
| ) | ||||
|  | ||||
| @@ -54,6 +56,8 @@ var ( | ||||
| 	ErrClientConnClosing = errors.New("grpc: the client connection is closing") | ||||
| 	// ErrClientConnTimeout indicates that the ClientConn cannot establish the | ||||
| 	// underlying connections within the specified timeout. | ||||
| 	// DEPRECATED: Please use context.DeadlineExceeded instead. This error will be | ||||
| 	// removed in Q1 2017. | ||||
| 	ErrClientConnTimeout = errors.New("grpc: timed out when dialing") | ||||
|  | ||||
| 	// errNoTransportSecurity indicates that there is no transport security | ||||
| @@ -75,7 +79,6 @@ var ( | ||||
| 	errConnClosing = errors.New("grpc: the connection is closing") | ||||
| 	// errConnUnavailable indicates that the connection is unavailable. | ||||
| 	errConnUnavailable = errors.New("grpc: the connection is unavailable") | ||||
| 	errNoAddr          = errors.New("grpc: there is no address available to dial") | ||||
| 	// minimum time to give a connection to complete | ||||
| 	minConnectTimeout = 20 * time.Second | ||||
| ) | ||||
| @@ -83,22 +86,33 @@ var ( | ||||
| // dialOptions configure a Dial call. dialOptions are set by the DialOption | ||||
| // values passed to Dial. | ||||
| type dialOptions struct { | ||||
| 	unaryInt  UnaryClientInterceptor | ||||
| 	streamInt StreamClientInterceptor | ||||
| 	codec     Codec | ||||
| 	cp        Compressor | ||||
| 	dc        Decompressor | ||||
| 	bs        backoffStrategy | ||||
| 	balancer  Balancer | ||||
| 	block     bool | ||||
| 	insecure  bool | ||||
| 	timeout   time.Duration | ||||
| 	copts     transport.ConnectOptions | ||||
| 	unaryInt   UnaryClientInterceptor | ||||
| 	streamInt  StreamClientInterceptor | ||||
| 	codec      Codec | ||||
| 	cp         Compressor | ||||
| 	dc         Decompressor | ||||
| 	bs         backoffStrategy | ||||
| 	balancer   Balancer | ||||
| 	block      bool | ||||
| 	insecure   bool | ||||
| 	timeout    time.Duration | ||||
| 	scChan     <-chan ServiceConfig | ||||
| 	copts      transport.ConnectOptions | ||||
| 	maxMsgSize int | ||||
| } | ||||
|  | ||||
| const defaultClientMaxMsgSize = math.MaxInt32 | ||||
|  | ||||
| // DialOption configures how we set up the connection. | ||||
| type DialOption func(*dialOptions) | ||||
|  | ||||
| // WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. | ||||
| func WithMaxMsgSize(s int) DialOption { | ||||
| 	return func(o *dialOptions) { | ||||
| 		o.maxMsgSize = s | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. | ||||
| func WithCodec(c Codec) DialOption { | ||||
| 	return func(o *dialOptions) { | ||||
| @@ -129,6 +143,13 @@ func WithBalancer(b Balancer) DialOption { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithServiceConfig returns a DialOption which has a channel to read the service configuration. | ||||
| func WithServiceConfig(c <-chan ServiceConfig) DialOption { | ||||
| 	return func(o *dialOptions) { | ||||
| 		o.scChan = c | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithBackoffMaxDelay configures the dialer to use the provided maximum delay | ||||
| // when backing off after failed connection attempts. | ||||
| func WithBackoffMaxDelay(md time.Duration) DialOption { | ||||
| @@ -212,6 +233,14 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithStatsHandler returns a DialOption that specifies the stats handler | ||||
| // for all the RPCs and underlying network connections in this ClientConn. | ||||
| func WithStatsHandler(h stats.Handler) DialOption { | ||||
| 	return func(o *dialOptions) { | ||||
| 		o.copts.StatsHandler = h | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors. | ||||
| // If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network | ||||
| // address and won't try to reconnect. | ||||
| @@ -230,6 +259,13 @@ func WithUserAgent(s string) DialOption { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport. | ||||
| func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { | ||||
| 	return func(o *dialOptions) { | ||||
| 		o.copts.KeepaliveParams = kp | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. | ||||
| func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { | ||||
| 	return func(o *dialOptions) { | ||||
| @@ -244,6 +280,15 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithAuthority returns a DialOption that specifies the value to be used as | ||||
| // the :authority pseudo-header. This value only works with WithInsecure and | ||||
| // has no effect if TransportCredentials are present. | ||||
| func WithAuthority(a string) DialOption { | ||||
| 	return func(o *dialOptions) { | ||||
| 		o.copts.Authority = a | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Dial creates a client connection to the given target. | ||||
| func Dial(target string, opts ...DialOption) (*ClientConn, error) { | ||||
| 	return DialContext(context.Background(), target, opts...) | ||||
| @@ -260,6 +305,32 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * | ||||
| 		conns:  make(map[Address]*addrConn), | ||||
| 	} | ||||
| 	cc.ctx, cc.cancel = context.WithCancel(context.Background()) | ||||
| 	cc.dopts.maxMsgSize = defaultClientMaxMsgSize | ||||
| 	for _, opt := range opts { | ||||
| 		opt(&cc.dopts) | ||||
| 	} | ||||
| 	cc.mkp = cc.dopts.copts.KeepaliveParams | ||||
|  | ||||
| 	if cc.dopts.copts.Dialer == nil { | ||||
| 		cc.dopts.copts.Dialer = newProxyDialer( | ||||
| 			func(ctx context.Context, addr string) (net.Conn, error) { | ||||
| 				return dialContext(ctx, "tcp", addr) | ||||
| 			}, | ||||
| 		) | ||||
| 	} | ||||
|  | ||||
| 	if cc.dopts.copts.UserAgent != "" { | ||||
| 		cc.dopts.copts.UserAgent += " " + grpcUA | ||||
| 	} else { | ||||
| 		cc.dopts.copts.UserAgent = grpcUA | ||||
| 	} | ||||
|  | ||||
| 	if cc.dopts.timeout > 0 { | ||||
| 		var cancel context.CancelFunc | ||||
| 		ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) | ||||
| 		defer cancel() | ||||
| 	} | ||||
|  | ||||
| 	defer func() { | ||||
| 		select { | ||||
| 		case <-ctx.Done(): | ||||
| @@ -272,10 +343,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	for _, opt := range opts { | ||||
| 		opt(&cc.dopts) | ||||
| 	if cc.dopts.scChan != nil { | ||||
| 		// Wait for the initial service config. | ||||
| 		select { | ||||
| 		case sc, ok := <-cc.dopts.scChan: | ||||
| 			if ok { | ||||
| 				cc.sc = sc | ||||
| 			} | ||||
| 		case <-ctx.Done(): | ||||
| 			return nil, ctx.Err() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Set defaults. | ||||
| 	if cc.dopts.codec == nil { | ||||
| 		cc.dopts.codec = protoCodec{} | ||||
| @@ -286,21 +364,18 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * | ||||
| 	creds := cc.dopts.copts.TransportCredentials | ||||
| 	if creds != nil && creds.Info().ServerName != "" { | ||||
| 		cc.authority = creds.Info().ServerName | ||||
| 	} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { | ||||
| 		cc.authority = cc.dopts.copts.Authority | ||||
| 	} else { | ||||
| 		colonPos := strings.LastIndex(target, ":") | ||||
| 		if colonPos == -1 { | ||||
| 			colonPos = len(target) | ||||
| 		} | ||||
| 		cc.authority = target[:colonPos] | ||||
| 		cc.authority = target | ||||
| 	} | ||||
| 	var ok bool | ||||
| 	waitC := make(chan error, 1) | ||||
| 	go func() { | ||||
| 		var addrs []Address | ||||
| 		if cc.dopts.balancer == nil { | ||||
| 			// Connect to target directly if balancer is nil. | ||||
| 			addrs = append(addrs, Address{Addr: target}) | ||||
| 		} else { | ||||
| 		defer close(waitC) | ||||
| 		if cc.dopts.balancer == nil && cc.sc.LB != nil { | ||||
| 			cc.dopts.balancer = cc.sc.LB | ||||
| 		} | ||||
| 		if cc.dopts.balancer != nil { | ||||
| 			var credsClone credentials.TransportCredentials | ||||
| 			if creds != nil { | ||||
| 				credsClone = creds.Clone() | ||||
| @@ -313,29 +388,23 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * | ||||
| 				return | ||||
| 			} | ||||
| 			ch := cc.dopts.balancer.Notify() | ||||
| 			if ch == nil { | ||||
| 				// There is no name resolver installed. | ||||
| 				addrs = append(addrs, Address{Addr: target}) | ||||
| 			} else { | ||||
| 				addrs, ok = <-ch | ||||
| 				if !ok || len(addrs) == 0 { | ||||
| 					waitC <- errNoAddr | ||||
| 					return | ||||
| 			if ch != nil { | ||||
| 				if cc.dopts.block { | ||||
| 					doneChan := make(chan struct{}) | ||||
| 					go cc.lbWatcher(doneChan) | ||||
| 					<-doneChan | ||||
| 				} else { | ||||
| 					go cc.lbWatcher(nil) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		for _, a := range addrs { | ||||
| 			if err := cc.resetAddrConn(a, false, nil); err != nil { | ||||
| 				waitC <- err | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 		close(waitC) | ||||
| 		// No balancer, or no resolver within the balancer.  Connect directly. | ||||
| 		if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil { | ||||
| 			waitC <- err | ||||
| 			return | ||||
| 		} | ||||
| 	}() | ||||
| 	var timeoutCh <-chan time.Time | ||||
| 	if cc.dopts.timeout > 0 { | ||||
| 		timeoutCh = time.After(cc.dopts.timeout) | ||||
| 	} | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| 		return nil, ctx.Err() | ||||
| @@ -343,14 +412,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	case <-timeoutCh: | ||||
| 		return nil, ErrClientConnTimeout | ||||
| 	} | ||||
| 	// If balancer is nil or balancer.Notify() is nil, ok will be false here. | ||||
| 	// The lbWatcher goroutine will not be created. | ||||
| 	if ok { | ||||
| 		go cc.lbWatcher() | ||||
|  | ||||
| 	if cc.dopts.scChan != nil { | ||||
| 		go cc.scWatcher() | ||||
| 	} | ||||
|  | ||||
| 	return cc, nil | ||||
| } | ||||
|  | ||||
| @@ -397,10 +464,16 @@ type ClientConn struct { | ||||
| 	dopts     dialOptions | ||||
|  | ||||
| 	mu    sync.RWMutex | ||||
| 	sc    ServiceConfig | ||||
| 	conns map[Address]*addrConn | ||||
| 	// Keepalive parameter can be udated if a GoAway is received. | ||||
| 	mkp keepalive.ClientParameters | ||||
| } | ||||
|  | ||||
| func (cc *ClientConn) lbWatcher() { | ||||
| // lbWatcher watches the Notify channel of the balancer in cc and manages | ||||
| // connections accordingly.  If doneChan is not nil, it is closed after the | ||||
| // first successfull connection is made. | ||||
| func (cc *ClientConn) lbWatcher(doneChan chan struct{}) { | ||||
| 	for addrs := range cc.dopts.balancer.Notify() { | ||||
| 		var ( | ||||
| 			add []Address   // Addresses need to setup connections. | ||||
| @@ -427,7 +500,15 @@ func (cc *ClientConn) lbWatcher() { | ||||
| 		} | ||||
| 		cc.mu.Unlock() | ||||
| 		for _, a := range add { | ||||
| 			cc.resetAddrConn(a, true, nil) | ||||
| 			if doneChan != nil { | ||||
| 				err := cc.resetAddrConn(a, true, nil) | ||||
| 				if err == nil { | ||||
| 					close(doneChan) | ||||
| 					doneChan = nil | ||||
| 				} | ||||
| 			} else { | ||||
| 				cc.resetAddrConn(a, false, nil) | ||||
| 			} | ||||
| 		} | ||||
| 		for _, c := range del { | ||||
| 			c.tearDown(errConnDrain) | ||||
| @@ -435,15 +516,36 @@ func (cc *ClientConn) lbWatcher() { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (cc *ClientConn) scWatcher() { | ||||
| 	for { | ||||
| 		select { | ||||
| 		case sc, ok := <-cc.dopts.scChan: | ||||
| 			if !ok { | ||||
| 				return | ||||
| 			} | ||||
| 			cc.mu.Lock() | ||||
| 			// TODO: load balance policy runtime change is ignored. | ||||
| 			// We may revist this decision in the future. | ||||
| 			cc.sc = sc | ||||
| 			cc.mu.Unlock() | ||||
| 		case <-cc.ctx.Done(): | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // resetAddrConn creates an addrConn for addr and adds it to cc.conns. | ||||
| // If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. | ||||
| // If tearDownErr is nil, errConnDrain will be used instead. | ||||
| func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { | ||||
| func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error { | ||||
| 	ac := &addrConn{ | ||||
| 		cc:    cc, | ||||
| 		addr:  addr, | ||||
| 		dopts: cc.dopts, | ||||
| 	} | ||||
| 	cc.mu.RLock() | ||||
| 	ac.dopts.copts.KeepaliveParams = cc.mkp | ||||
| 	cc.mu.RUnlock() | ||||
| 	ac.ctx, ac.cancel = context.WithCancel(cc.ctx) | ||||
| 	ac.stateCV = sync.NewCond(&ac.mu) | ||||
| 	if EnableTracing { | ||||
| @@ -488,8 +590,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err | ||||
| 			stale.tearDown(tearDownErr) | ||||
| 		} | ||||
| 	} | ||||
| 	// skipWait may overwrite the decision in ac.dopts.block. | ||||
| 	if ac.dopts.block && !skipWait { | ||||
| 	if block { | ||||
| 		if err := ac.resetTransport(false); err != nil { | ||||
| 			if err != errConnClosing { | ||||
| 				// Tear down ac and delete it from cc.conns. | ||||
| @@ -522,6 +623,14 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // TODO: Avoid the locking here. | ||||
| func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) { | ||||
| 	cc.mu.RLock() | ||||
| 	defer cc.mu.RUnlock() | ||||
| 	m, ok = cc.sc.Methods[method] | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { | ||||
| 	var ( | ||||
| 		ac  *addrConn | ||||
| @@ -560,6 +669,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) | ||||
| 	} | ||||
| 	if !ok { | ||||
| 		if put != nil { | ||||
| 			updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) | ||||
| 			put() | ||||
| 		} | ||||
| 		return nil, nil, errConnClosing | ||||
| @@ -567,6 +677,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) | ||||
| 	t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) | ||||
| 	if err != nil { | ||||
| 		if put != nil { | ||||
| 			updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) | ||||
| 			put() | ||||
| 		} | ||||
| 		return nil, nil, err | ||||
| @@ -618,6 +729,20 @@ type addrConn struct { | ||||
| 	tearDownErr error | ||||
| } | ||||
|  | ||||
| // adjustParams updates parameters used to create transports upon | ||||
| // receiving a GoAway. | ||||
| func (ac *addrConn) adjustParams(r transport.GoAwayReason) { | ||||
| 	switch r { | ||||
| 	case transport.TooManyPings: | ||||
| 		v := 2 * ac.dopts.copts.KeepaliveParams.Time | ||||
| 		ac.cc.mu.Lock() | ||||
| 		if v > ac.cc.mkp.Time { | ||||
| 			ac.cc.mkp.Time = v | ||||
| 		} | ||||
| 		ac.cc.mu.Unlock() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // printf records an event in ac's event log, unless ac has been closed. | ||||
| // REQUIRES ac.mu is held. | ||||
| func (ac *addrConn) printf(format string, a ...interface{}) { | ||||
| @@ -702,6 +827,8 @@ func (ac *addrConn) resetTransport(closeTransport bool) error { | ||||
| 			Metadata: ac.addr.Metadata, | ||||
| 		} | ||||
| 		newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) | ||||
| 		// Don't call cancel in success path due to a race in Go 1.6: | ||||
| 		// https://github.com/golang/go/issues/15078. | ||||
| 		if err != nil { | ||||
| 			cancel() | ||||
|  | ||||
| @@ -772,6 +899,7 @@ func (ac *addrConn) transportMonitor() { | ||||
| 			} | ||||
| 			return | ||||
| 		case <-t.GoAway(): | ||||
| 			ac.adjustParams(t.GetGoAwayReason()) | ||||
| 			// If GoAway happens without any network I/O error, ac is closed without shutting down the | ||||
| 			// underlying transport (the transport will be closed when all the pending RPCs finished or | ||||
| 			// failed.). | ||||
| @@ -780,9 +908,9 @@ func (ac *addrConn) transportMonitor() { | ||||
| 			// In both cases, a new ac is created. | ||||
| 			select { | ||||
| 			case <-t.Error(): | ||||
| 				ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) | ||||
| 				ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) | ||||
| 			default: | ||||
| 				ac.cc.resetAddrConn(ac.addr, true, errConnDrain) | ||||
| 				ac.cc.resetAddrConn(ac.addr, false, errConnDrain) | ||||
| 			} | ||||
| 			return | ||||
| 		case <-t.Error(): | ||||
| @@ -791,7 +919,8 @@ func (ac *addrConn) transportMonitor() { | ||||
| 				t.Close() | ||||
| 				return | ||||
| 			case <-t.GoAway(): | ||||
| 				ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) | ||||
| 				ac.adjustParams(t.GetGoAwayReason()) | ||||
| 				ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) | ||||
| 				return | ||||
| 			default: | ||||
| 			} | ||||
|   | ||||
							
								
								
									
										118
									
								
								vendor/google.golang.org/grpc/codec.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								vendor/google.golang.org/grpc/codec.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,118 @@ | ||||
| /* | ||||
| * | ||||
|  * Copyright 2014, Google Inc. | ||||
|  * All rights reserved. | ||||
|  * | ||||
|  * Redistribution and use in source and binary forms, with or without | ||||
|  * modification, are permitted provided that the following conditions are | ||||
|  * met: | ||||
|  * | ||||
|  *     * Redistributions of source code must retain the above copyright | ||||
|  * notice, this list of conditions and the following disclaimer. | ||||
|  *     * Redistributions in binary form must reproduce the above | ||||
|  * copyright notice, this list of conditions and the following disclaimer | ||||
|  * in the documentation and/or other materials provided with the | ||||
|  * distribution. | ||||
|  *     * Neither the name of Google Inc. nor the names of its | ||||
|  * contributors may be used to endorse or promote products derived from | ||||
|  * this software without specific prior written permission. | ||||
|  * | ||||
|  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  * | ||||
| */ | ||||
|  | ||||
| package grpc | ||||
|  | ||||
| import ( | ||||
| 	"math" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| ) | ||||
|  | ||||
| // Codec defines the interface gRPC uses to encode and decode messages. | ||||
| // Note that implementations of this interface must be thread safe; | ||||
| // a Codec's methods can be called from concurrent goroutines. | ||||
| type Codec interface { | ||||
| 	// Marshal returns the wire format of v. | ||||
| 	Marshal(v interface{}) ([]byte, error) | ||||
| 	// Unmarshal parses the wire format into v. | ||||
| 	Unmarshal(data []byte, v interface{}) error | ||||
| 	// String returns the name of the Codec implementation. The returned | ||||
| 	// string will be used as part of content type in transmission. | ||||
| 	String() string | ||||
| } | ||||
|  | ||||
| // protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. | ||||
| type protoCodec struct { | ||||
| } | ||||
|  | ||||
| type cachedProtoBuffer struct { | ||||
| 	lastMarshaledSize uint32 | ||||
| 	proto.Buffer | ||||
| } | ||||
|  | ||||
| func capToMaxInt32(val int) uint32 { | ||||
| 	if val > math.MaxInt32 { | ||||
| 		return uint32(math.MaxInt32) | ||||
| 	} | ||||
| 	return uint32(val) | ||||
| } | ||||
|  | ||||
| func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { | ||||
| 	protoMsg := v.(proto.Message) | ||||
| 	newSlice := make([]byte, 0, cb.lastMarshaledSize) | ||||
|  | ||||
| 	cb.SetBuf(newSlice) | ||||
| 	cb.Reset() | ||||
| 	if err := cb.Marshal(protoMsg); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	out := cb.Bytes() | ||||
| 	cb.lastMarshaledSize = capToMaxInt32(len(out)) | ||||
| 	return out, nil | ||||
| } | ||||
|  | ||||
| func (p protoCodec) Marshal(v interface{}) ([]byte, error) { | ||||
| 	cb := protoBufferPool.Get().(*cachedProtoBuffer) | ||||
| 	out, err := p.marshal(v, cb) | ||||
|  | ||||
| 	// put back buffer and lose the ref to the slice | ||||
| 	cb.SetBuf(nil) | ||||
| 	protoBufferPool.Put(cb) | ||||
| 	return out, err | ||||
| } | ||||
|  | ||||
| func (p protoCodec) Unmarshal(data []byte, v interface{}) error { | ||||
| 	cb := protoBufferPool.Get().(*cachedProtoBuffer) | ||||
| 	cb.SetBuf(data) | ||||
| 	err := cb.Unmarshal(v.(proto.Message)) | ||||
| 	cb.SetBuf(nil) | ||||
| 	protoBufferPool.Put(cb) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (protoCodec) String() string { | ||||
| 	return "proto" | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	protoBufferPool = &sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			return &cachedProtoBuffer{ | ||||
| 				Buffer:            proto.Buffer{}, | ||||
| 				lastMarshaledSize: 16, | ||||
| 			} | ||||
| 		}, | ||||
| 	} | ||||
| ) | ||||
							
								
								
									
										8
									
								
								vendor/google.golang.org/grpc/credentials/credentials.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/google.golang.org/grpc/credentials/credentials.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -102,6 +102,10 @@ type TransportCredentials interface { | ||||
| 	// authentication protocol on rawConn for clients. It returns the authenticated | ||||
| 	// connection and the corresponding auth information about the connection. | ||||
| 	// Implementations must use the provided context to implement timely cancellation. | ||||
| 	// gRPC will try to reconnect if the error returned is a temporary error | ||||
| 	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). | ||||
| 	// If the returned error is a wrapper error, implementations should make sure that | ||||
| 	// the error implements Temporary() to have the correct retry behaviors. | ||||
| 	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) | ||||
| 	// ServerHandshake does the authentication handshake for servers. It returns | ||||
| 	// the authenticated connection and the corresponding auth information about | ||||
| @@ -165,9 +169,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net | ||||
| 	case <-ctx.Done(): | ||||
| 		return nil, nil, ctx.Err() | ||||
| 	} | ||||
| 	// TODO(zhaoq): Omit the auth info for client now. It is more for | ||||
| 	// information than anything else. | ||||
| 	return conn, nil, nil | ||||
| 	return conn, TLSInfo{conn.ConnectionState()}, nil | ||||
| } | ||||
|  | ||||
| func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { | ||||
|   | ||||
							
								
								
									
										3
									
								
								vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,4 +1,5 @@ | ||||
| // +build go1.7 | ||||
| // +build !go1.8 | ||||
|  | ||||
| /* | ||||
|  * | ||||
| @@ -44,8 +45,6 @@ import ( | ||||
| // contains a mutex and must not be copied. | ||||
| // | ||||
| // If cfg is nil, a new zero tls.Config is returned. | ||||
| // | ||||
| // TODO replace this function with official clone function. | ||||
| func cloneTLSConfig(cfg *tls.Config) *tls.Config { | ||||
| 	if cfg == nil { | ||||
| 		return &tls.Config{} | ||||
|   | ||||
							
								
								
									
										53
									
								
								vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| // +build go1.8 | ||||
|  | ||||
| /* | ||||
|  * | ||||
|  * Copyright 2017, Google Inc. | ||||
|  * All rights reserved. | ||||
|  * | ||||
|  * Redistribution and use in source and binary forms, with or without | ||||
|  * modification, are permitted provided that the following conditions are | ||||
|  * met: | ||||
|  * | ||||
|  *     * Redistributions of source code must retain the above copyright | ||||
|  * notice, this list of conditions and the following disclaimer. | ||||
|  *     * Redistributions in binary form must reproduce the above | ||||
|  * copyright notice, this list of conditions and the following disclaimer | ||||
|  * in the documentation and/or other materials provided with the | ||||
|  * distribution. | ||||
|  *     * Neither the name of Google Inc. nor the names of its | ||||
|  * contributors may be used to endorse or promote products derived from | ||||
|  * this software without specific prior written permission. | ||||
|  * | ||||
|  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| package credentials | ||||
|  | ||||
| import ( | ||||
| 	"crypto/tls" | ||||
| ) | ||||
|  | ||||
| // cloneTLSConfig returns a shallow clone of the exported | ||||
| // fields of cfg, ignoring the unexported sync.Once, which | ||||
| // contains a mutex and must not be copied. | ||||
| // | ||||
| // If cfg is nil, a new zero tls.Config is returned. | ||||
| func cloneTLSConfig(cfg *tls.Config) *tls.Config { | ||||
| 	if cfg == nil { | ||||
| 		return &tls.Config{} | ||||
| 	} | ||||
|  | ||||
| 	return cfg.Clone() | ||||
| } | ||||
							
								
								
									
										2
									
								
								vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -44,8 +44,6 @@ import ( | ||||
| // contains a mutex and must not be copied. | ||||
| // | ||||
| // If cfg is nil, a new zero tls.Config is returned. | ||||
| // | ||||
| // TODO replace this function with official clone function. | ||||
| func cloneTLSConfig(cfg *tls.Config) *tls.Config { | ||||
| 	if cfg == nil { | ||||
| 		return &tls.Config{} | ||||
|   | ||||
							
								
								
									
										56
									
								
								vendor/google.golang.org/grpc/go16.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								vendor/google.golang.org/grpc/go16.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | ||||
| // +build go1.6,!go1.7 | ||||
|  | ||||
| /* | ||||
|  * Copyright 2016, Google Inc. | ||||
|  * All rights reserved. | ||||
|  * | ||||
|  * Redistribution and use in source and binary forms, with or without | ||||
|  * modification, are permitted provided that the following conditions are | ||||
|  * met: | ||||
|  * | ||||
|  *     * Redistributions of source code must retain the above copyright | ||||
|  * notice, this list of conditions and the following disclaimer. | ||||
|  *     * Redistributions in binary form must reproduce the above | ||||
|  * copyright notice, this list of conditions and the following disclaimer | ||||
|  * in the documentation and/or other materials provided with the | ||||
|  * distribution. | ||||
|  *     * Neither the name of Google Inc. nor the names of its | ||||
|  * contributors may be used to endorse or promote products derived from | ||||
|  * this software without specific prior written permission. | ||||
|  * | ||||
|  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| package grpc | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
|  | ||||
| 	"golang.org/x/net/context" | ||||
| ) | ||||
|  | ||||
| // dialContext connects to the address on the named network. | ||||
| func dialContext(ctx context.Context, network, address string) (net.Conn, error) { | ||||
| 	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) | ||||
| } | ||||
|  | ||||
| func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { | ||||
| 	req.Cancel = ctx.Done() | ||||
| 	if err := req.Write(conn); err != nil { | ||||
| 		return fmt.Errorf("failed to write the HTTP request: %v", err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| @@ -1,4 +1,4 @@ | ||||
| // +build !go1.6 | ||||
| // +build go1.7 | ||||
| 
 | ||||
| /* | ||||
|  * Copyright 2016, Google Inc. | ||||
| @@ -32,20 +32,24 @@ | ||||
|  * | ||||
|  */ | ||||
| 
 | ||||
| package transport | ||||
| package grpc | ||||
| 
 | ||||
| import ( | ||||
| 	"net" | ||||
| 	"time" | ||||
| 	"net/http" | ||||
| 
 | ||||
| 	"golang.org/x/net/context" | ||||
| ) | ||||
| 
 | ||||
| // dialContext connects to the address on the named network. | ||||
| func dialContext(ctx context.Context, network, address string) (net.Conn, error) { | ||||
| 	var dialer net.Dialer | ||||
| 	if deadline, ok := ctx.Deadline(); ok { | ||||
| 		dialer.Timeout = deadline.Sub(time.Now()) | ||||
| 	} | ||||
| 	return dialer.Dial(network, address) | ||||
| 	return (&net.Dialer{}).DialContext(ctx, network, address) | ||||
| } | ||||
| 
 | ||||
| func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { | ||||
| 	req = req.WithContext(ctx) | ||||
| 	if err := req.Write(conn); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										749
									
								
								vendor/google.golang.org/grpc/grpclb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										749
									
								
								vendor/google.golang.org/grpc/grpclb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,749 @@ | ||||
| /* | ||||
|  * | ||||
|  * Copyright 2016, Google Inc. | ||||
|  * All rights reserved. | ||||
|  * | ||||
|  * Redistribution and use in source and binary forms, with or without | ||||
|  * modification, are permitted provided that the following conditions are | ||||
|  * met: | ||||
|  * | ||||
|  *     * Redistributions of source code must retain the above copyright | ||||
|  * notice, this list of conditions and the following disclaimer. | ||||
|  *     * Redistributions in binary form must reproduce the above | ||||
|  * copyright notice, this list of conditions and the following disclaimer | ||||
|  * in the documentation and/or other materials provided with the | ||||
|  * distribution. | ||||
|  *     * Neither the name of Google Inc. nor the names of its | ||||
|  * contributors may be used to endorse or promote products derived from | ||||
|  * this software without specific prior written permission. | ||||
|  * | ||||
|  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| package grpc | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math/rand" | ||||
| 	"net" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"golang.org/x/net/context" | ||||
| 	"google.golang.org/grpc/codes" | ||||
| 	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" | ||||
| 	"google.golang.org/grpc/grpclog" | ||||
| 	"google.golang.org/grpc/metadata" | ||||
| 	"google.golang.org/grpc/naming" | ||||
| ) | ||||
|  | ||||
| // Client API for LoadBalancer service. | ||||
| // Mostly copied from generated pb.go file. | ||||
| // To avoid circular dependency. | ||||
| type loadBalancerClient struct { | ||||
| 	cc *ClientConn | ||||
| } | ||||
|  | ||||
| func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) { | ||||
| 	desc := &StreamDesc{ | ||||
| 		StreamName:    "BalanceLoad", | ||||
| 		ServerStreams: true, | ||||
| 		ClientStreams: true, | ||||
| 	} | ||||
| 	stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	x := &balanceLoadClientStream{stream} | ||||
| 	return x, nil | ||||
| } | ||||
|  | ||||
| type balanceLoadClientStream struct { | ||||
| 	ClientStream | ||||
| } | ||||
|  | ||||
| func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { | ||||
| 	return x.ClientStream.SendMsg(m) | ||||
| } | ||||
|  | ||||
| func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { | ||||
| 	m := new(lbpb.LoadBalanceResponse) | ||||
| 	if err := x.ClientStream.RecvMsg(m); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return m, nil | ||||
| } | ||||
|  | ||||
| // AddressType indicates the address type returned by name resolution. | ||||
| type AddressType uint8 | ||||
|  | ||||
| const ( | ||||
| 	// Backend indicates the server is a backend server. | ||||
| 	Backend AddressType = iota | ||||
| 	// GRPCLB indicates the server is a grpclb load balancer. | ||||
| 	GRPCLB | ||||
| ) | ||||
|  | ||||
| // AddrMetadataGRPCLB contains the information the name resolution for grpclb should provide. The | ||||
| // name resolver used by grpclb balancer is required to provide this type of metadata in | ||||
| // its address updates. | ||||
| type AddrMetadataGRPCLB struct { | ||||
| 	// AddrType is the type of server (grpc load balancer or backend). | ||||
| 	AddrType AddressType | ||||
| 	// ServerName is the name of the grpc load balancer. Used for authentication. | ||||
| 	ServerName string | ||||
| } | ||||
|  | ||||
| // NewGRPCLBBalancer creates a grpclb load balancer. | ||||
| func NewGRPCLBBalancer(r naming.Resolver) Balancer { | ||||
| 	return &balancer{ | ||||
| 		r: r, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type remoteBalancerInfo struct { | ||||
| 	addr string | ||||
| 	// the server name used for authentication with the remote LB server. | ||||
| 	name string | ||||
| } | ||||
|  | ||||
| // grpclbAddrInfo consists of the information of a backend server. | ||||
| type grpclbAddrInfo struct { | ||||
| 	addr      Address | ||||
| 	connected bool | ||||
| 	// dropForRateLimiting indicates whether this particular request should be | ||||
| 	// dropped by the client for rate limiting. | ||||
| 	dropForRateLimiting bool | ||||
| 	// dropForLoadBalancing indicates whether this particular request should be | ||||
| 	// dropped by the client for load balancing. | ||||
| 	dropForLoadBalancing bool | ||||
| } | ||||
|  | ||||
| type balancer struct { | ||||
| 	r        naming.Resolver | ||||
| 	target   string | ||||
| 	mu       sync.Mutex | ||||
| 	seq      int // a sequence number to make sure addrCh does not get stale addresses. | ||||
| 	w        naming.Watcher | ||||
| 	addrCh   chan []Address | ||||
| 	rbs      []remoteBalancerInfo | ||||
| 	addrs    []*grpclbAddrInfo | ||||
| 	next     int | ||||
| 	waitCh   chan struct{} | ||||
| 	done     bool | ||||
| 	expTimer *time.Timer | ||||
| 	rand     *rand.Rand | ||||
|  | ||||
| 	clientStats lbpb.ClientStats | ||||
| } | ||||
|  | ||||
| func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { | ||||
| 	updates, err := w.Next() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
| 	if b.done { | ||||
| 		return ErrClientConnClosing | ||||
| 	} | ||||
| 	for _, update := range updates { | ||||
| 		switch update.Op { | ||||
| 		case naming.Add: | ||||
| 			var exist bool | ||||
| 			for _, v := range b.rbs { | ||||
| 				// TODO: Is the same addr with different server name a different balancer? | ||||
| 				if update.Addr == v.addr { | ||||
| 					exist = true | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			if exist { | ||||
| 				continue | ||||
| 			} | ||||
| 			md, ok := update.Metadata.(*AddrMetadataGRPCLB) | ||||
| 			if !ok { | ||||
| 				// TODO: Revisit the handling here and may introduce some fallback mechanism. | ||||
| 				grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata) | ||||
| 				continue | ||||
| 			} | ||||
| 			switch md.AddrType { | ||||
| 			case Backend: | ||||
| 				// TODO: Revisit the handling here and may introduce some fallback mechanism. | ||||
| 				grpclog.Printf("The name resolution does not give grpclb addresses") | ||||
| 				continue | ||||
| 			case GRPCLB: | ||||
| 				b.rbs = append(b.rbs, remoteBalancerInfo{ | ||||
| 					addr: update.Addr, | ||||
| 					name: md.ServerName, | ||||
| 				}) | ||||
| 			default: | ||||
| 				grpclog.Printf("Received unknow address type %d", md.AddrType) | ||||
| 				continue | ||||
| 			} | ||||
| 		case naming.Delete: | ||||
| 			for i, v := range b.rbs { | ||||
| 				if update.Addr == v.addr { | ||||
| 					copy(b.rbs[i:], b.rbs[i+1:]) | ||||
| 					b.rbs = b.rbs[:len(b.rbs)-1] | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 		default: | ||||
| 			grpclog.Println("Unknown update.Op ", update.Op) | ||||
| 		} | ||||
| 	} | ||||
| 	// TODO: Fall back to the basic round-robin load balancing if the resulting address is | ||||
| 	// not a load balancer. | ||||
| 	select { | ||||
| 	case <-ch: | ||||
| 	default: | ||||
| 	} | ||||
| 	ch <- b.rbs | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (b *balancer) serverListExpire(seq int) { | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
| 	// TODO: gRPC interanls do not clear the connections when the server list is stale. | ||||
| 	// This means RPCs will keep using the existing server list until b receives new | ||||
| 	// server list even though the list is expired. Revisit this behavior later. | ||||
| 	if b.done || seq < b.seq { | ||||
| 		return | ||||
| 	} | ||||
| 	b.next = 0 | ||||
| 	b.addrs = nil | ||||
| 	// Ask grpc internals to close all the corresponding connections. | ||||
| 	b.addrCh <- nil | ||||
| } | ||||
|  | ||||
| func convertDuration(d *lbpb.Duration) time.Duration { | ||||
| 	if d == nil { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond | ||||
| } | ||||
|  | ||||
| func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { | ||||
| 	if l == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	servers := l.GetServers() | ||||
| 	expiration := convertDuration(l.GetExpirationInterval()) | ||||
| 	var ( | ||||
| 		sl    []*grpclbAddrInfo | ||||
| 		addrs []Address | ||||
| 	) | ||||
| 	for _, s := range servers { | ||||
| 		md := metadata.Pairs("lb-token", s.LoadBalanceToken) | ||||
| 		addr := Address{ | ||||
| 			Addr:     fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port), | ||||
| 			Metadata: &md, | ||||
| 		} | ||||
| 		sl = append(sl, &grpclbAddrInfo{ | ||||
| 			addr:                 addr, | ||||
| 			dropForRateLimiting:  s.DropForRateLimiting, | ||||
| 			dropForLoadBalancing: s.DropForLoadBalancing, | ||||
| 		}) | ||||
| 		addrs = append(addrs, addr) | ||||
| 	} | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
| 	if b.done || seq < b.seq { | ||||
| 		return | ||||
| 	} | ||||
| 	if len(sl) > 0 { | ||||
| 		// reset b.next to 0 when replacing the server list. | ||||
| 		b.next = 0 | ||||
| 		b.addrs = sl | ||||
| 		b.addrCh <- addrs | ||||
| 		if b.expTimer != nil { | ||||
| 			b.expTimer.Stop() | ||||
| 			b.expTimer = nil | ||||
| 		} | ||||
| 		if expiration > 0 { | ||||
| 			b.expTimer = time.AfterFunc(expiration, func() { | ||||
| 				b.serverListExpire(seq) | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { | ||||
| 	ticker := time.NewTicker(interval) | ||||
| 	defer ticker.Stop() | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ticker.C: | ||||
| 		case <-done: | ||||
| 			return | ||||
| 		} | ||||
| 		b.mu.Lock() | ||||
| 		stats := b.clientStats | ||||
| 		b.clientStats = lbpb.ClientStats{} // Clear the stats. | ||||
| 		b.mu.Unlock() | ||||
| 		t := time.Now() | ||||
| 		stats.Timestamp = &lbpb.Timestamp{ | ||||
| 			Seconds: t.Unix(), | ||||
| 			Nanos:   int32(t.Nanosecond()), | ||||
| 		} | ||||
| 		if err := s.Send(&lbpb.LoadBalanceRequest{ | ||||
| 			LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ | ||||
| 				ClientStats: &stats, | ||||
| 			}, | ||||
| 		}); err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	stream, err := lbc.BalanceLoad(ctx) | ||||
| 	if err != nil { | ||||
| 		grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) | ||||
| 		return | ||||
| 	} | ||||
| 	b.mu.Lock() | ||||
| 	if b.done { | ||||
| 		b.mu.Unlock() | ||||
| 		return | ||||
| 	} | ||||
| 	b.mu.Unlock() | ||||
| 	initReq := &lbpb.LoadBalanceRequest{ | ||||
| 		LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ | ||||
| 			InitialRequest: &lbpb.InitialLoadBalanceRequest{ | ||||
| 				Name: b.target, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	if err := stream.Send(initReq); err != nil { | ||||
| 		// TODO: backoff on retry? | ||||
| 		return true | ||||
| 	} | ||||
| 	reply, err := stream.Recv() | ||||
| 	if err != nil { | ||||
| 		// TODO: backoff on retry? | ||||
| 		return true | ||||
| 	} | ||||
| 	initResp := reply.GetInitialResponse() | ||||
| 	if initResp == nil { | ||||
| 		grpclog.Println("Failed to receive the initial response from the remote balancer.") | ||||
| 		return | ||||
| 	} | ||||
| 	// TODO: Support delegation. | ||||
| 	if initResp.LoadBalancerDelegate != "" { | ||||
| 		// delegation | ||||
| 		grpclog.Println("TODO: Delegation is not supported yet.") | ||||
| 		return | ||||
| 	} | ||||
| 	streamDone := make(chan struct{}) | ||||
| 	defer close(streamDone) | ||||
| 	b.mu.Lock() | ||||
| 	b.clientStats = lbpb.ClientStats{} // Clear client stats. | ||||
| 	b.mu.Unlock() | ||||
| 	if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { | ||||
| 		go b.sendLoadReport(stream, d, streamDone) | ||||
| 	} | ||||
| 	// Retrieve the server list. | ||||
| 	for { | ||||
| 		reply, err := stream.Recv() | ||||
| 		if err != nil { | ||||
| 			break | ||||
| 		} | ||||
| 		b.mu.Lock() | ||||
| 		if b.done || seq < b.seq { | ||||
| 			b.mu.Unlock() | ||||
| 			return | ||||
| 		} | ||||
| 		b.seq++ // tick when receiving a new list of servers. | ||||
| 		seq = b.seq | ||||
| 		b.mu.Unlock() | ||||
| 		if serverList := reply.GetServerList(); serverList != nil { | ||||
| 			b.processServerList(serverList, seq) | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func (b *balancer) Start(target string, config BalancerConfig) error { | ||||
| 	b.rand = rand.New(rand.NewSource(time.Now().Unix())) | ||||
| 	// TODO: Fall back to the basic direct connection if there is no name resolver. | ||||
| 	if b.r == nil { | ||||
| 		return errors.New("there is no name resolver installed") | ||||
| 	} | ||||
| 	b.target = target | ||||
| 	b.mu.Lock() | ||||
| 	if b.done { | ||||
| 		b.mu.Unlock() | ||||
| 		return ErrClientConnClosing | ||||
| 	} | ||||
| 	b.addrCh = make(chan []Address) | ||||
| 	w, err := b.r.Resolve(target) | ||||
| 	if err != nil { | ||||
| 		b.mu.Unlock() | ||||
| 		return err | ||||
| 	} | ||||
| 	b.w = w | ||||
| 	b.mu.Unlock() | ||||
| 	balancerAddrsCh := make(chan []remoteBalancerInfo, 1) | ||||
| 	// Spawn a goroutine to monitor the name resolution of remote load balancer. | ||||
| 	go func() { | ||||
| 		for { | ||||
| 			if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { | ||||
| 				grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) | ||||
| 				close(balancerAddrsCh) | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| 	// Spawn a goroutine to talk to the remote load balancer. | ||||
| 	go func() { | ||||
| 		var ( | ||||
| 			cc *ClientConn | ||||
| 			// ccError is closed when there is an error in the current cc. | ||||
| 			// A new rb should be picked from rbs and connected. | ||||
| 			ccError chan struct{} | ||||
| 			rb      *remoteBalancerInfo | ||||
| 			rbs     []remoteBalancerInfo | ||||
| 			rbIdx   int | ||||
| 		) | ||||
|  | ||||
| 		defer func() { | ||||
| 			if ccError != nil { | ||||
| 				select { | ||||
| 				case <-ccError: | ||||
| 				default: | ||||
| 					close(ccError) | ||||
| 				} | ||||
| 			} | ||||
| 			if cc != nil { | ||||
| 				cc.Close() | ||||
| 			} | ||||
| 		}() | ||||
|  | ||||
| 		for { | ||||
| 			var ok bool | ||||
| 			select { | ||||
| 			case rbs, ok = <-balancerAddrsCh: | ||||
| 				if !ok { | ||||
| 					return | ||||
| 				} | ||||
| 				foundIdx := -1 | ||||
| 				if rb != nil { | ||||
| 					for i, trb := range rbs { | ||||
| 						if trb == *rb { | ||||
| 							foundIdx = i | ||||
| 							break | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 				if foundIdx >= 0 { | ||||
| 					if foundIdx >= 1 { | ||||
| 						// Move the address in use to the beginning of the list. | ||||
| 						b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0] | ||||
| 						rbIdx = 0 | ||||
| 					} | ||||
| 					continue // If found, don't dial new cc. | ||||
| 				} else if len(rbs) > 0 { | ||||
| 					// Pick a random one from the list, instead of always using the first one. | ||||
| 					if l := len(rbs); l > 1 && rb != nil { | ||||
| 						tmpIdx := b.rand.Intn(l - 1) | ||||
| 						b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0] | ||||
| 					} | ||||
| 					rbIdx = 0 | ||||
| 					rb = &rbs[0] | ||||
| 				} else { | ||||
| 					// foundIdx < 0 && len(rbs) <= 0. | ||||
| 					rb = nil | ||||
| 				} | ||||
| 			case <-ccError: | ||||
| 				ccError = nil | ||||
| 				if rbIdx < len(rbs)-1 { | ||||
| 					rbIdx++ | ||||
| 					rb = &rbs[rbIdx] | ||||
| 				} else { | ||||
| 					rb = nil | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			if rb == nil { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			if cc != nil { | ||||
| 				cc.Close() | ||||
| 			} | ||||
| 			// Talk to the remote load balancer to get the server list. | ||||
| 			var err error | ||||
| 			creds := config.DialCreds | ||||
| 			ccError = make(chan struct{}) | ||||
| 			if creds == nil { | ||||
| 				cc, err = Dial(rb.addr, WithInsecure()) | ||||
| 			} else { | ||||
| 				if rb.name != "" { | ||||
| 					if err := creds.OverrideServerName(rb.name); err != nil { | ||||
| 						grpclog.Printf("Failed to override the server name in the credentials: %v", err) | ||||
| 						continue | ||||
| 					} | ||||
| 				} | ||||
| 				cc, err = Dial(rb.addr, WithTransportCredentials(creds)) | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err) | ||||
| 				close(ccError) | ||||
| 				continue | ||||
| 			} | ||||
| 			b.mu.Lock() | ||||
| 			b.seq++ // tick when getting a new balancer address | ||||
| 			seq := b.seq | ||||
| 			b.next = 0 | ||||
| 			b.mu.Unlock() | ||||
| 			go func(cc *ClientConn, ccError chan struct{}) { | ||||
| 				lbc := &loadBalancerClient{cc} | ||||
| 				b.callRemoteBalancer(lbc, seq) | ||||
| 				cc.Close() | ||||
| 				select { | ||||
| 				case <-ccError: | ||||
| 				default: | ||||
| 					close(ccError) | ||||
| 				} | ||||
| 			}(cc, ccError) | ||||
| 		} | ||||
| 	}() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (b *balancer) down(addr Address, err error) { | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
| 	for _, a := range b.addrs { | ||||
| 		if addr == a.addr { | ||||
| 			a.connected = false | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *balancer) Up(addr Address) func(error) { | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
| 	if b.done { | ||||
| 		return nil | ||||
| 	} | ||||
| 	var cnt int | ||||
| 	for _, a := range b.addrs { | ||||
| 		if a.addr == addr { | ||||
| 			if a.connected { | ||||
| 				return nil | ||||
| 			} | ||||
| 			a.connected = true | ||||
| 		} | ||||
| 		if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing { | ||||
| 			cnt++ | ||||
| 		} | ||||
| 	} | ||||
| 	// addr is the only one which is connected. Notify the Get() callers who are blocking. | ||||
| 	if cnt == 1 && b.waitCh != nil { | ||||
| 		close(b.waitCh) | ||||
| 		b.waitCh = nil | ||||
| 	} | ||||
| 	return func(err error) { | ||||
| 		b.down(addr, err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { | ||||
| 	var ch chan struct{} | ||||
| 	b.mu.Lock() | ||||
| 	if b.done { | ||||
| 		b.mu.Unlock() | ||||
| 		err = ErrClientConnClosing | ||||
| 		return | ||||
| 	} | ||||
| 	seq := b.seq | ||||
|  | ||||
| 	defer func() { | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		put = func() { | ||||
| 			s, ok := rpcInfoFromContext(ctx) | ||||
| 			if !ok { | ||||
| 				return | ||||
| 			} | ||||
| 			b.mu.Lock() | ||||
| 			defer b.mu.Unlock() | ||||
| 			if b.done || seq < b.seq { | ||||
| 				return | ||||
| 			} | ||||
| 			b.clientStats.NumCallsFinished++ | ||||
| 			if !s.bytesSent { | ||||
| 				b.clientStats.NumCallsFinishedWithClientFailedToSend++ | ||||
| 			} else if s.bytesReceived { | ||||
| 				b.clientStats.NumCallsFinishedKnownReceived++ | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	b.clientStats.NumCallsStarted++ | ||||
| 	if len(b.addrs) > 0 { | ||||
| 		if b.next >= len(b.addrs) { | ||||
| 			b.next = 0 | ||||
| 		} | ||||
| 		next := b.next | ||||
| 		for { | ||||
| 			a := b.addrs[next] | ||||
| 			next = (next + 1) % len(b.addrs) | ||||
| 			if a.connected { | ||||
| 				if !a.dropForRateLimiting && !a.dropForLoadBalancing { | ||||
| 					addr = a.addr | ||||
| 					b.next = next | ||||
| 					b.mu.Unlock() | ||||
| 					return | ||||
| 				} | ||||
| 				if !opts.BlockingWait { | ||||
| 					b.next = next | ||||
| 					if a.dropForLoadBalancing { | ||||
| 						b.clientStats.NumCallsFinished++ | ||||
| 						b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ | ||||
| 					} else if a.dropForRateLimiting { | ||||
| 						b.clientStats.NumCallsFinished++ | ||||
| 						b.clientStats.NumCallsFinishedWithDropForRateLimiting++ | ||||
| 					} | ||||
| 					b.mu.Unlock() | ||||
| 					err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr) | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			if next == b.next { | ||||
| 				// Has iterated all the possible address but none is connected. | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if !opts.BlockingWait { | ||||
| 		if len(b.addrs) == 0 { | ||||
| 			b.clientStats.NumCallsFinished++ | ||||
| 			b.clientStats.NumCallsFinishedWithClientFailedToSend++ | ||||
| 			b.mu.Unlock() | ||||
| 			err = Errorf(codes.Unavailable, "there is no address available") | ||||
| 			return | ||||
| 		} | ||||
| 		// Returns the next addr on b.addrs for a failfast RPC. | ||||
| 		addr = b.addrs[b.next].addr | ||||
| 		b.next++ | ||||
| 		b.mu.Unlock() | ||||
| 		return | ||||
| 	} | ||||
| 	// Wait on b.waitCh for non-failfast RPCs. | ||||
| 	if b.waitCh == nil { | ||||
| 		ch = make(chan struct{}) | ||||
| 		b.waitCh = ch | ||||
| 	} else { | ||||
| 		ch = b.waitCh | ||||
| 	} | ||||
| 	b.mu.Unlock() | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ctx.Done(): | ||||
| 			b.mu.Lock() | ||||
| 			b.clientStats.NumCallsFinished++ | ||||
| 			b.clientStats.NumCallsFinishedWithClientFailedToSend++ | ||||
| 			b.mu.Unlock() | ||||
| 			err = ctx.Err() | ||||
| 			return | ||||
| 		case <-ch: | ||||
| 			b.mu.Lock() | ||||
| 			if b.done { | ||||
| 				b.clientStats.NumCallsFinished++ | ||||
| 				b.clientStats.NumCallsFinishedWithClientFailedToSend++ | ||||
| 				b.mu.Unlock() | ||||
| 				err = ErrClientConnClosing | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			if len(b.addrs) > 0 { | ||||
| 				if b.next >= len(b.addrs) { | ||||
| 					b.next = 0 | ||||
| 				} | ||||
| 				next := b.next | ||||
| 				for { | ||||
| 					a := b.addrs[next] | ||||
| 					next = (next + 1) % len(b.addrs) | ||||
| 					if a.connected { | ||||
| 						if !a.dropForRateLimiting && !a.dropForLoadBalancing { | ||||
| 							addr = a.addr | ||||
| 							b.next = next | ||||
| 							b.mu.Unlock() | ||||
| 							return | ||||
| 						} | ||||
| 						if !opts.BlockingWait { | ||||
| 							b.next = next | ||||
| 							if a.dropForLoadBalancing { | ||||
| 								b.clientStats.NumCallsFinished++ | ||||
| 								b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ | ||||
| 							} else if a.dropForRateLimiting { | ||||
| 								b.clientStats.NumCallsFinished++ | ||||
| 								b.clientStats.NumCallsFinishedWithDropForRateLimiting++ | ||||
| 							} | ||||
| 							b.mu.Unlock() | ||||
| 							err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr) | ||||
| 							return | ||||
| 						} | ||||
| 					} | ||||
| 					if next == b.next { | ||||
| 						// Has iterated all the possible address but none is connected. | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			// The newly added addr got removed by Down() again. | ||||
| 			if b.waitCh == nil { | ||||
| 				ch = make(chan struct{}) | ||||
| 				b.waitCh = ch | ||||
| 			} else { | ||||
| 				ch = b.waitCh | ||||
| 			} | ||||
| 			b.mu.Unlock() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *balancer) Notify() <-chan []Address { | ||||
| 	return b.addrCh | ||||
| } | ||||
|  | ||||
| func (b *balancer) Close() error { | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
| 	b.done = true | ||||
| 	if b.expTimer != nil { | ||||
| 		b.expTimer.Stop() | ||||
| 	} | ||||
| 	if b.waitCh != nil { | ||||
| 		close(b.waitCh) | ||||
| 	} | ||||
| 	if b.addrCh != nil { | ||||
| 		close(b.addrCh) | ||||
| 	} | ||||
| 	if b.w != nil { | ||||
| 		b.w.Close() | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										629
									
								
								vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										629
									
								
								vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,629 @@ | ||||
| // Code generated by protoc-gen-go. | ||||
| // source: grpclb.proto | ||||
| // DO NOT EDIT! | ||||
|  | ||||
| /* | ||||
| Package grpc_lb_v1 is a generated protocol buffer package. | ||||
|  | ||||
| It is generated from these files: | ||||
| 	grpclb.proto | ||||
|  | ||||
| It has these top-level messages: | ||||
| 	Duration | ||||
| 	Timestamp | ||||
| 	LoadBalanceRequest | ||||
| 	InitialLoadBalanceRequest | ||||
| 	ClientStats | ||||
| 	LoadBalanceResponse | ||||
| 	InitialLoadBalanceResponse | ||||
| 	ServerList | ||||
| 	Server | ||||
| */ | ||||
| package grpc_lb_v1 | ||||
|  | ||||
| import proto "github.com/golang/protobuf/proto" | ||||
| import fmt "fmt" | ||||
| import math "math" | ||||
|  | ||||
| // Reference imports to suppress errors if they are not otherwise used. | ||||
| var _ = proto.Marshal | ||||
| var _ = fmt.Errorf | ||||
| var _ = math.Inf | ||||
|  | ||||
| // This is a compile-time assertion to ensure that this generated file | ||||
| // is compatible with the proto package it is being compiled against. | ||||
| // A compilation error at this line likely means your copy of the | ||||
| // proto package needs to be updated. | ||||
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | ||||
|  | ||||
| type Duration struct { | ||||
| 	// Signed seconds of the span of time. Must be from -315,576,000,000 | ||||
| 	// to +315,576,000,000 inclusive. | ||||
| 	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` | ||||
| 	// Signed fractions of a second at nanosecond resolution of the span | ||||
| 	// of time. Durations less than one second are represented with a 0 | ||||
| 	// `seconds` field and a positive or negative `nanos` field. For durations | ||||
| 	// of one second or more, a non-zero value for the `nanos` field must be | ||||
| 	// of the same sign as the `seconds` field. Must be from -999,999,999 | ||||
| 	// to +999,999,999 inclusive. | ||||
| 	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *Duration) Reset()                    { *m = Duration{} } | ||||
| func (m *Duration) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Duration) ProtoMessage()               {} | ||||
| func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | ||||
|  | ||||
| func (m *Duration) GetSeconds() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.Seconds | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Duration) GetNanos() int32 { | ||||
| 	if m != nil { | ||||
| 		return m.Nanos | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| type Timestamp struct { | ||||
| 	// Represents seconds of UTC time since Unix epoch | ||||
| 	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to | ||||
| 	// 9999-12-31T23:59:59Z inclusive. | ||||
| 	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` | ||||
| 	// Non-negative fractions of a second at nanosecond resolution. Negative | ||||
| 	// second values with fractions must still have non-negative nanos values | ||||
| 	// that count forward in time. Must be from 0 to 999,999,999 | ||||
| 	// inclusive. | ||||
| 	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *Timestamp) Reset()                    { *m = Timestamp{} } | ||||
| func (m *Timestamp) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Timestamp) ProtoMessage()               {} | ||||
| func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } | ||||
|  | ||||
| func (m *Timestamp) GetSeconds() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.Seconds | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Timestamp) GetNanos() int32 { | ||||
| 	if m != nil { | ||||
| 		return m.Nanos | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| type LoadBalanceRequest struct { | ||||
| 	// Types that are valid to be assigned to LoadBalanceRequestType: | ||||
| 	//	*LoadBalanceRequest_InitialRequest | ||||
| 	//	*LoadBalanceRequest_ClientStats | ||||
| 	LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` | ||||
| } | ||||
|  | ||||
| func (m *LoadBalanceRequest) Reset()                    { *m = LoadBalanceRequest{} } | ||||
| func (m *LoadBalanceRequest) String() string            { return proto.CompactTextString(m) } | ||||
| func (*LoadBalanceRequest) ProtoMessage()               {} | ||||
| func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } | ||||
|  | ||||
| type isLoadBalanceRequest_LoadBalanceRequestType interface { | ||||
| 	isLoadBalanceRequest_LoadBalanceRequestType() | ||||
| } | ||||
|  | ||||
| type LoadBalanceRequest_InitialRequest struct { | ||||
| 	InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"` | ||||
| } | ||||
| type LoadBalanceRequest_ClientStats struct { | ||||
| 	ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"` | ||||
| } | ||||
|  | ||||
| func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} | ||||
| func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType()    {} | ||||
|  | ||||
| func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { | ||||
| 	if m != nil { | ||||
| 		return m.LoadBalanceRequestType | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { | ||||
| 	if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { | ||||
| 		return x.InitialRequest | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *LoadBalanceRequest) GetClientStats() *ClientStats { | ||||
| 	if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { | ||||
| 		return x.ClientStats | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // XXX_OneofFuncs is for the internal use of the proto package. | ||||
| func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { | ||||
| 	return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ | ||||
| 		(*LoadBalanceRequest_InitialRequest)(nil), | ||||
| 		(*LoadBalanceRequest_ClientStats)(nil), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { | ||||
| 	m := msg.(*LoadBalanceRequest) | ||||
| 	// load_balance_request_type | ||||
| 	switch x := m.LoadBalanceRequestType.(type) { | ||||
| 	case *LoadBalanceRequest_InitialRequest: | ||||
| 		b.EncodeVarint(1<<3 | proto.WireBytes) | ||||
| 		if err := b.EncodeMessage(x.InitialRequest); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case *LoadBalanceRequest_ClientStats: | ||||
| 		b.EncodeVarint(2<<3 | proto.WireBytes) | ||||
| 		if err := b.EncodeMessage(x.ClientStats); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case nil: | ||||
| 	default: | ||||
| 		return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { | ||||
| 	m := msg.(*LoadBalanceRequest) | ||||
| 	switch tag { | ||||
| 	case 1: // load_balance_request_type.initial_request | ||||
| 		if wire != proto.WireBytes { | ||||
| 			return true, proto.ErrInternalBadWireType | ||||
| 		} | ||||
| 		msg := new(InitialLoadBalanceRequest) | ||||
| 		err := b.DecodeMessage(msg) | ||||
| 		m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} | ||||
| 		return true, err | ||||
| 	case 2: // load_balance_request_type.client_stats | ||||
| 		if wire != proto.WireBytes { | ||||
| 			return true, proto.ErrInternalBadWireType | ||||
| 		} | ||||
| 		msg := new(ClientStats) | ||||
| 		err := b.DecodeMessage(msg) | ||||
| 		m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} | ||||
| 		return true, err | ||||
| 	default: | ||||
| 		return false, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { | ||||
| 	m := msg.(*LoadBalanceRequest) | ||||
| 	// load_balance_request_type | ||||
| 	switch x := m.LoadBalanceRequestType.(type) { | ||||
| 	case *LoadBalanceRequest_InitialRequest: | ||||
| 		s := proto.Size(x.InitialRequest) | ||||
| 		n += proto.SizeVarint(1<<3 | proto.WireBytes) | ||||
| 		n += proto.SizeVarint(uint64(s)) | ||||
| 		n += s | ||||
| 	case *LoadBalanceRequest_ClientStats: | ||||
| 		s := proto.Size(x.ClientStats) | ||||
| 		n += proto.SizeVarint(2<<3 | proto.WireBytes) | ||||
| 		n += proto.SizeVarint(uint64(s)) | ||||
| 		n += s | ||||
| 	case nil: | ||||
| 	default: | ||||
| 		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) | ||||
| 	} | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| type InitialLoadBalanceRequest struct { | ||||
| 	// Name of load balanced service (IE, balancer.service.com) | ||||
| 	// length should be less than 256 bytes. | ||||
| 	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *InitialLoadBalanceRequest) Reset()                    { *m = InitialLoadBalanceRequest{} } | ||||
| func (m *InitialLoadBalanceRequest) String() string            { return proto.CompactTextString(m) } | ||||
| func (*InitialLoadBalanceRequest) ProtoMessage()               {} | ||||
| func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } | ||||
|  | ||||
| func (m *InitialLoadBalanceRequest) GetName() string { | ||||
| 	if m != nil { | ||||
| 		return m.Name | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // Contains client level statistics that are useful to load balancing. Each | ||||
| // count except the timestamp should be reset to zero after reporting the stats. | ||||
| type ClientStats struct { | ||||
| 	// The timestamp of generating the report. | ||||
| 	Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` | ||||
| 	// The total number of RPCs that started. | ||||
| 	NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"` | ||||
| 	// The total number of RPCs that finished. | ||||
| 	NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"` | ||||
| 	// The total number of RPCs that were dropped by the client because of rate | ||||
| 	// limiting. | ||||
| 	NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"` | ||||
| 	// The total number of RPCs that were dropped by the client because of load | ||||
| 	// balancing. | ||||
| 	NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"` | ||||
| 	// The total number of RPCs that failed to reach a server except dropped RPCs. | ||||
| 	NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"` | ||||
| 	// The total number of RPCs that finished and are known to have been received | ||||
| 	// by a server. | ||||
| 	NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *ClientStats) Reset()                    { *m = ClientStats{} } | ||||
| func (m *ClientStats) String() string            { return proto.CompactTextString(m) } | ||||
| func (*ClientStats) ProtoMessage()               {} | ||||
| func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } | ||||
|  | ||||
| func (m *ClientStats) GetTimestamp() *Timestamp { | ||||
| 	if m != nil { | ||||
| 		return m.Timestamp | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *ClientStats) GetNumCallsStarted() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.NumCallsStarted | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *ClientStats) GetNumCallsFinished() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.NumCallsFinished | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.NumCallsFinishedWithDropForRateLimiting | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.NumCallsFinishedWithDropForLoadBalancing | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.NumCallsFinishedWithClientFailedToSend | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { | ||||
| 	if m != nil { | ||||
| 		return m.NumCallsFinishedKnownReceived | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| type LoadBalanceResponse struct { | ||||
| 	// Types that are valid to be assigned to LoadBalanceResponseType: | ||||
| 	//	*LoadBalanceResponse_InitialResponse | ||||
| 	//	*LoadBalanceResponse_ServerList | ||||
| 	LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` | ||||
| } | ||||
|  | ||||
| func (m *LoadBalanceResponse) Reset()                    { *m = LoadBalanceResponse{} } | ||||
| func (m *LoadBalanceResponse) String() string            { return proto.CompactTextString(m) } | ||||
| func (*LoadBalanceResponse) ProtoMessage()               {} | ||||
| func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } | ||||
|  | ||||
| type isLoadBalanceResponse_LoadBalanceResponseType interface { | ||||
| 	isLoadBalanceResponse_LoadBalanceResponseType() | ||||
| } | ||||
|  | ||||
| type LoadBalanceResponse_InitialResponse struct { | ||||
| 	InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"` | ||||
| } | ||||
| type LoadBalanceResponse_ServerList struct { | ||||
| 	ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"` | ||||
| } | ||||
|  | ||||
| func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} | ||||
| func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType()      {} | ||||
|  | ||||
| func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { | ||||
| 	if m != nil { | ||||
| 		return m.LoadBalanceResponseType | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { | ||||
| 	if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { | ||||
| 		return x.InitialResponse | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *LoadBalanceResponse) GetServerList() *ServerList { | ||||
| 	if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { | ||||
| 		return x.ServerList | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // XXX_OneofFuncs is for the internal use of the proto package. | ||||
| func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { | ||||
| 	return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ | ||||
| 		(*LoadBalanceResponse_InitialResponse)(nil), | ||||
| 		(*LoadBalanceResponse_ServerList)(nil), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { | ||||
| 	m := msg.(*LoadBalanceResponse) | ||||
| 	// load_balance_response_type | ||||
| 	switch x := m.LoadBalanceResponseType.(type) { | ||||
| 	case *LoadBalanceResponse_InitialResponse: | ||||
| 		b.EncodeVarint(1<<3 | proto.WireBytes) | ||||
| 		if err := b.EncodeMessage(x.InitialResponse); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case *LoadBalanceResponse_ServerList: | ||||
| 		b.EncodeVarint(2<<3 | proto.WireBytes) | ||||
| 		if err := b.EncodeMessage(x.ServerList); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case nil: | ||||
| 	default: | ||||
| 		return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { | ||||
| 	m := msg.(*LoadBalanceResponse) | ||||
| 	switch tag { | ||||
| 	case 1: // load_balance_response_type.initial_response | ||||
| 		if wire != proto.WireBytes { | ||||
| 			return true, proto.ErrInternalBadWireType | ||||
| 		} | ||||
| 		msg := new(InitialLoadBalanceResponse) | ||||
| 		err := b.DecodeMessage(msg) | ||||
| 		m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} | ||||
| 		return true, err | ||||
| 	case 2: // load_balance_response_type.server_list | ||||
| 		if wire != proto.WireBytes { | ||||
| 			return true, proto.ErrInternalBadWireType | ||||
| 		} | ||||
| 		msg := new(ServerList) | ||||
| 		err := b.DecodeMessage(msg) | ||||
| 		m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} | ||||
| 		return true, err | ||||
| 	default: | ||||
| 		return false, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { | ||||
| 	m := msg.(*LoadBalanceResponse) | ||||
| 	// load_balance_response_type | ||||
| 	switch x := m.LoadBalanceResponseType.(type) { | ||||
| 	case *LoadBalanceResponse_InitialResponse: | ||||
| 		s := proto.Size(x.InitialResponse) | ||||
| 		n += proto.SizeVarint(1<<3 | proto.WireBytes) | ||||
| 		n += proto.SizeVarint(uint64(s)) | ||||
| 		n += s | ||||
| 	case *LoadBalanceResponse_ServerList: | ||||
| 		s := proto.Size(x.ServerList) | ||||
| 		n += proto.SizeVarint(2<<3 | proto.WireBytes) | ||||
| 		n += proto.SizeVarint(uint64(s)) | ||||
| 		n += s | ||||
| 	case nil: | ||||
| 	default: | ||||
| 		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) | ||||
| 	} | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| type InitialLoadBalanceResponse struct { | ||||
| 	// This is an application layer redirect that indicates the client should use | ||||
| 	// the specified server for load balancing. When this field is non-empty in | ||||
| 	// the response, the client should open a separate connection to the | ||||
| 	// load_balancer_delegate and call the BalanceLoad method. Its length should | ||||
| 	// be less than 64 bytes. | ||||
| 	LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"` | ||||
| 	// This interval defines how often the client should send the client stats | ||||
| 	// to the load balancer. Stats should only be reported when the duration is | ||||
| 	// positive. | ||||
| 	ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *InitialLoadBalanceResponse) Reset()                    { *m = InitialLoadBalanceResponse{} } | ||||
| func (m *InitialLoadBalanceResponse) String() string            { return proto.CompactTextString(m) } | ||||
| func (*InitialLoadBalanceResponse) ProtoMessage()               {} | ||||
| func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } | ||||
|  | ||||
| func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { | ||||
| 	if m != nil { | ||||
| 		return m.LoadBalancerDelegate | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration { | ||||
| 	if m != nil { | ||||
| 		return m.ClientStatsReportInterval | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type ServerList struct { | ||||
| 	// Contains a list of servers selected by the load balancer. The list will | ||||
| 	// be updated when server resolutions change or as needed to balance load | ||||
| 	// across more servers. The client should consume the server list in order | ||||
| 	// unless instructed otherwise via the client_config. | ||||
| 	Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` | ||||
| 	// Indicates the amount of time that the client should consider this server | ||||
| 	// list as valid. It may be considered stale after waiting this interval of | ||||
| 	// time after receiving the list. If the interval is not positive, the | ||||
| 	// client can assume the list is valid until the next list is received. | ||||
| 	ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *ServerList) Reset()                    { *m = ServerList{} } | ||||
| func (m *ServerList) String() string            { return proto.CompactTextString(m) } | ||||
| func (*ServerList) ProtoMessage()               {} | ||||
| func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } | ||||
|  | ||||
| func (m *ServerList) GetServers() []*Server { | ||||
| 	if m != nil { | ||||
| 		return m.Servers | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *ServerList) GetExpirationInterval() *Duration { | ||||
| 	if m != nil { | ||||
| 		return m.ExpirationInterval | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Contains server information. When none of the [drop_for_*] fields are true, | ||||
| // use the other fields. When drop_for_rate_limiting is true, ignore all other | ||||
| // fields. Use drop_for_load_balancing only when it is true and | ||||
| // drop_for_rate_limiting is false. | ||||
| type Server struct { | ||||
| 	// A resolved address for the server, serialized in network-byte-order. It may | ||||
| 	// either be an IPv4 or IPv6 address. | ||||
| 	IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` | ||||
| 	// A resolved port number for the server. | ||||
| 	Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` | ||||
| 	// An opaque but printable token given to the frontend for each pick. All | ||||
| 	// frontend requests for that pick must include the token in its initial | ||||
| 	// metadata. The token is used by the backend to verify the request and to | ||||
| 	// allow the backend to report load to the gRPC LB system. | ||||
| 	// | ||||
| 	// Its length is variable but less than 50 bytes. | ||||
| 	LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"` | ||||
| 	// Indicates whether this particular request should be dropped by the client | ||||
| 	// for rate limiting. | ||||
| 	DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"` | ||||
| 	// Indicates whether this particular request should be dropped by the client | ||||
| 	// for load balancing. | ||||
| 	DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *Server) Reset()                    { *m = Server{} } | ||||
| func (m *Server) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Server) ProtoMessage()               {} | ||||
| func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } | ||||
|  | ||||
| func (m *Server) GetIpAddress() []byte { | ||||
| 	if m != nil { | ||||
| 		return m.IpAddress | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Server) GetPort() int32 { | ||||
| 	if m != nil { | ||||
| 		return m.Port | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Server) GetLoadBalanceToken() string { | ||||
| 	if m != nil { | ||||
| 		return m.LoadBalanceToken | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Server) GetDropForRateLimiting() bool { | ||||
| 	if m != nil { | ||||
| 		return m.DropForRateLimiting | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (m *Server) GetDropForLoadBalancing() bool { | ||||
| 	if m != nil { | ||||
| 		return m.DropForLoadBalancing | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func init() { | ||||
| 	proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") | ||||
| 	proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp") | ||||
| 	proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") | ||||
| 	proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") | ||||
| 	proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") | ||||
| 	proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") | ||||
| 	proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") | ||||
| 	proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") | ||||
| 	proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") | ||||
| } | ||||
|  | ||||
| func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) } | ||||
|  | ||||
| var fileDescriptor0 = []byte{ | ||||
| 	// 733 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, | ||||
| 	0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34, | ||||
| 	0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a, | ||||
| 	0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9, | ||||
| 	0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1, | ||||
| 	0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92, | ||||
| 	0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51, | ||||
| 	0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0, | ||||
| 	0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51, | ||||
| 	0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7, | ||||
| 	0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4, | ||||
| 	0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13, | ||||
| 	0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67, | ||||
| 	0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed, | ||||
| 	0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93, | ||||
| 	0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f, | ||||
| 	0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2, | ||||
| 	0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4, | ||||
| 	0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd, | ||||
| 	0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2, | ||||
| 	0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd, | ||||
| 	0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71, | ||||
| 	0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a, | ||||
| 	0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c, | ||||
| 	0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c, | ||||
| 	0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0, | ||||
| 	0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84, | ||||
| 	0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37, | ||||
| 	0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5, | ||||
| 	0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f, | ||||
| 	0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07, | ||||
| 	0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71, | ||||
| 	0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f, | ||||
| 	0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87, | ||||
| 	0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94, | ||||
| 	0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56, | ||||
| 	0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9, | ||||
| 	0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a, | ||||
| 	0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e, | ||||
| 	0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87, | ||||
| 	0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28, | ||||
| 	0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70, | ||||
| 	0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94, | ||||
| 	0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5, | ||||
| 	0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff, | ||||
| 	0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00, | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	 Stephen J Day
					Stephen J Day