Update kubernetes to 0caa20c65f147e15f5545862510eb7e81c42b0a3.
Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
parent
047df7aca6
commit
ae85d0ee81
@ -3,4 +3,4 @@ CNI_VERSION=v0.6.0
|
|||||||
CONTAINERD_VERSION=f12ba2407e328c98f8be5eacbb9c510b073dd4c0
|
CONTAINERD_VERSION=f12ba2407e328c98f8be5eacbb9c510b073dd4c0
|
||||||
CONTAINERD_REPO=
|
CONTAINERD_REPO=
|
||||||
CRITOOL_VERSION=ded07bb08aa23492fa0233bb3af8c4629875f286
|
CRITOOL_VERSION=ded07bb08aa23492fa0233bb3af8c4629875f286
|
||||||
KUBERNETES_VERSION=v1.9.0
|
KUBERNETES_VERSION=0caa20c65f147e15f5545862510eb7e81c42b0a3
|
||||||
|
25
vendor.conf
25
vendor.conf
@ -22,19 +22,11 @@ github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
|
|||||||
github.com/gogo/protobuf v0.5
|
github.com/gogo/protobuf v0.5
|
||||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||||
github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
|
github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
|
||||||
github.com/google/btree 7d79101e329e5a3adf994758c578dab82b90c017
|
|
||||||
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||||
github.com/go-openapi/jsonpointer 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
|
||||||
github.com/go-openapi/jsonreference 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
|
||||||
github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff
|
|
||||||
github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
|
||||||
github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6
|
|
||||||
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
|
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
|
||||||
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
||||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||||
github.com/json-iterator/go 1.0.4
|
github.com/json-iterator/go 1.0.4
|
||||||
github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
|
||||||
github.com/mailru/easyjson d5b7844b561a7bc640052f1b935f7b800330d7e0
|
|
||||||
github.com/Microsoft/go-winio v0.4.5
|
github.com/Microsoft/go-winio v0.4.5
|
||||||
github.com/Microsoft/hcsshim v0.6.7
|
github.com/Microsoft/hcsshim v0.6.7
|
||||||
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
|
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
|
||||||
@ -43,11 +35,8 @@ github.com/opencontainers/runc 9f9c96235cc97674e935002fc3d78361b696a69e
|
|||||||
github.com/opencontainers/runtime-spec v1.0.1
|
github.com/opencontainers/runtime-spec v1.0.1
|
||||||
github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d
|
github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d
|
||||||
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
|
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
|
||||||
github.com/peterbourgon/diskv v2.0.1
|
|
||||||
github.com/pkg/errors v0.8.0
|
github.com/pkg/errors v0.8.0
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
github.com/pmezard/go-difflib v1.0.0
|
||||||
github.com/PuerkitoBio/purell v1.0.0
|
|
||||||
github.com/PuerkitoBio/urlesc 5bd2802263f21d8788851d5305584c82a5c75d7e
|
|
||||||
github.com/renstrom/dedent 020d11c3b9c0c7a3c2efcc8e5cf5b9ef7bcea21f
|
github.com/renstrom/dedent 020d11c3b9c0c7a3c2efcc8e5cf5b9ef7bcea21f
|
||||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||||
github.com/sirupsen/logrus v1.0.0
|
github.com/sirupsen/logrus v1.0.0
|
||||||
@ -60,14 +49,14 @@ golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
|||||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||||
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
|
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
|
||||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||||
|
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||||
google.golang.org/grpc v1.7.4
|
google.golang.org/grpc v1.7.4
|
||||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||||
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||||
k8s.io/api 9382f5a87a364c195477134986b578d103c7c24d
|
k8s.io/api beab4da9671e79815b7876363175af45aa180eb5
|
||||||
k8s.io/apimachinery 23bc0b9defba312e3b5938fa0d0581f3e882f862
|
k8s.io/apimachinery 6212319467788d635606616d5c6d87ded0321d8c
|
||||||
k8s.io/apiserver cbbf2be01950dbf88df68c3ac331e532b3664464
|
k8s.io/apiserver 340247246b5ea93d9b51908592ed8f6b94f82674
|
||||||
k8s.io/client-go 87887458218a51f3944b2f4c553eb38173458e97
|
k8s.io/client-go 33bd23f75b6de861994706a322b0afab824b2171
|
||||||
k8s.io/kube-openapi b16ebc07f5cad97831f961e4b5a9cc1caed33b7e
|
k8s.io/kubernetes 0caa20c65f147e15f5545862510eb7e81c42b0a3
|
||||||
k8s.io/kubernetes v1.9.0
|
k8s.io/utils a99a3e11a96751670db62ba77c6d278d1136931e
|
||||||
k8s.io/utils 66423a0293c555337adc04fe2c59748151291de8
|
|
||||||
|
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
Copyright (c) 2012, Martin Angers
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
185
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
185
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
@ -1,185 +0,0 @@
|
|||||||
# Purell
|
|
||||||
|
|
||||||
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
|
|
||||||
|
|
||||||
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
|
||||||
|
|
||||||
[](http://travis-ci.org/PuerkitoBio/purell)
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
`go get github.com/PuerkitoBio/purell`
|
|
||||||
|
|
||||||
## Changelog
|
|
||||||
|
|
||||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
|
||||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
|
||||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
|
||||||
* **v0.1.0** : Initial release.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
|
|
||||||
|
|
||||||
```go
|
|
||||||
package purell
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleNormalizeURLString() {
|
|
||||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
|
||||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
|
||||||
panic(err)
|
|
||||||
} else {
|
|
||||||
fmt.Print(normalized)
|
|
||||||
}
|
|
||||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleMustNormalizeURLString() {
|
|
||||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
|
||||||
FlagsUnsafeGreedy)
|
|
||||||
fmt.Print(normalized)
|
|
||||||
|
|
||||||
// Output: http://somewebsite.com/Amazing%FA/url
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleNormalizeURL() {
|
|
||||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
|
||||||
panic(err)
|
|
||||||
} else {
|
|
||||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
|
||||||
fmt.Print(normalized)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## API
|
|
||||||
|
|
||||||
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
|
|
||||||
|
|
||||||
```go
|
|
||||||
const (
|
|
||||||
// Safe normalizations
|
|
||||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
|
||||||
FlagLowercaseHost // http://HOST -> http://host
|
|
||||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
|
||||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
|
||||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
|
||||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
|
||||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
|
||||||
|
|
||||||
// Usually safe normalizations
|
|
||||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
|
||||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
|
||||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
|
||||||
|
|
||||||
// Unsafe normalizations
|
|
||||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
|
||||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
|
||||||
FlagForceHTTP // https://host -> http://host
|
|
||||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
|
||||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
|
||||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
|
||||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
|
||||||
|
|
||||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
|
||||||
// submitted by jehiah
|
|
||||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
|
||||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
|
||||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
|
||||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
|
||||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
|
||||||
|
|
||||||
// Convenience set of safe normalizations
|
|
||||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
|
||||||
|
|
||||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
|
||||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
|
||||||
|
|
||||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
|
||||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
|
||||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
|
||||||
|
|
||||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
|
||||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
|
||||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
|
||||||
|
|
||||||
// Convenience set of all available flags
|
|
||||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
|
|
||||||
|
|
||||||
The [full godoc reference is available on gopkgdoc][godoc].
|
|
||||||
|
|
||||||
Some things to note:
|
|
||||||
|
|
||||||
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
|
|
||||||
|
|
||||||
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
|
|
||||||
- %24 -> $
|
|
||||||
- %26 -> &
|
|
||||||
- %2B-%3B -> +,-./0123456789:;
|
|
||||||
- %3D -> =
|
|
||||||
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
|
||||||
- %5F -> _
|
|
||||||
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
|
|
||||||
- %7E -> ~
|
|
||||||
|
|
||||||
|
|
||||||
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
|
|
||||||
|
|
||||||
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
|
|
||||||
|
|
||||||
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
|
|
||||||
|
|
||||||
### Safe vs Usually Safe vs Unsafe
|
|
||||||
|
|
||||||
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
|
|
||||||
|
|
||||||
Consider the following URL:
|
|
||||||
|
|
||||||
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
|
||||||
|
|
||||||
Normalizing with the `FlagsSafe` gives:
|
|
||||||
|
|
||||||
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
|
||||||
|
|
||||||
With the `FlagsUsuallySafeGreedy`:
|
|
||||||
|
|
||||||
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
|
|
||||||
|
|
||||||
And with `FlagsUnsafeGreedy`:
|
|
||||||
|
|
||||||
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
|
|
||||||
|
|
||||||
## TODOs
|
|
||||||
|
|
||||||
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
|
|
||||||
|
|
||||||
## Thanks / Contributions
|
|
||||||
|
|
||||||
@rogpeppe
|
|
||||||
@jehiah
|
|
||||||
@opennota
|
|
||||||
@pchristopher1275
|
|
||||||
@zenovich
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
The [BSD 3-Clause license][bsd].
|
|
||||||
|
|
||||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
|
||||||
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
|
|
||||||
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
|
|
||||||
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
|
|
||||||
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
|
|
||||||
[iss7]: https://github.com/PuerkitoBio/purell/issues/7
|
|
375
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
375
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
@ -1,375 +0,0 @@
|
|||||||
/*
|
|
||||||
Package purell offers URL normalization as described on the wikipedia page:
|
|
||||||
http://en.wikipedia.org/wiki/URL_normalization
|
|
||||||
*/
|
|
||||||
package purell
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/PuerkitoBio/urlesc"
|
|
||||||
"golang.org/x/net/idna"
|
|
||||||
"golang.org/x/text/secure/precis"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A set of normalization flags determines how a URL will
|
|
||||||
// be normalized.
|
|
||||||
type NormalizationFlags uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Safe normalizations
|
|
||||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
|
||||||
FlagLowercaseHost // http://HOST -> http://host
|
|
||||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
|
||||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
|
||||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
|
||||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
|
||||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
|
||||||
|
|
||||||
// Usually safe normalizations
|
|
||||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
|
||||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
|
||||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
|
||||||
|
|
||||||
// Unsafe normalizations
|
|
||||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
|
||||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
|
||||||
FlagForceHTTP // https://host -> http://host
|
|
||||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
|
||||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
|
||||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
|
||||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
|
||||||
|
|
||||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
|
||||||
// submitted by jehiah
|
|
||||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
|
||||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
|
||||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
|
||||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
|
||||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
|
||||||
|
|
||||||
// Convenience set of safe normalizations
|
|
||||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
|
||||||
|
|
||||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
|
||||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
|
||||||
|
|
||||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
|
||||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
|
||||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
|
||||||
|
|
||||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
|
||||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
|
||||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
|
||||||
|
|
||||||
// Convenience set of all available flags
|
|
||||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultHttpPort = ":80"
|
|
||||||
defaultHttpsPort = ":443"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Regular expressions used by the normalizations
|
|
||||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
|
||||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
|
||||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
|
||||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
|
||||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
|
||||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
|
||||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
|
||||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
|
||||||
|
|
||||||
// Map of flags to implementation function.
|
|
||||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
|
||||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
|
||||||
|
|
||||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
|
||||||
var flagsOrder = []NormalizationFlags{
|
|
||||||
FlagLowercaseScheme,
|
|
||||||
FlagLowercaseHost,
|
|
||||||
FlagRemoveDefaultPort,
|
|
||||||
FlagRemoveDirectoryIndex,
|
|
||||||
FlagRemoveDotSegments,
|
|
||||||
FlagRemoveFragment,
|
|
||||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
|
||||||
FlagRemoveDuplicateSlashes,
|
|
||||||
FlagRemoveWWW,
|
|
||||||
FlagAddWWW,
|
|
||||||
FlagSortQuery,
|
|
||||||
FlagDecodeDWORDHost,
|
|
||||||
FlagDecodeOctalHost,
|
|
||||||
FlagDecodeHexHost,
|
|
||||||
FlagRemoveUnnecessaryHostDots,
|
|
||||||
FlagRemoveEmptyPortSeparator,
|
|
||||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
|
||||||
FlagAddTrailingSlash,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... and then the map, where order is unimportant
|
|
||||||
var flags = map[NormalizationFlags]func(*url.URL){
|
|
||||||
FlagLowercaseScheme: lowercaseScheme,
|
|
||||||
FlagLowercaseHost: lowercaseHost,
|
|
||||||
FlagRemoveDefaultPort: removeDefaultPort,
|
|
||||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
|
||||||
FlagRemoveDotSegments: removeDotSegments,
|
|
||||||
FlagRemoveFragment: removeFragment,
|
|
||||||
FlagForceHTTP: forceHTTP,
|
|
||||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
|
||||||
FlagRemoveWWW: removeWWW,
|
|
||||||
FlagAddWWW: addWWW,
|
|
||||||
FlagSortQuery: sortQuery,
|
|
||||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
|
||||||
FlagDecodeOctalHost: decodeOctalHost,
|
|
||||||
FlagDecodeHexHost: decodeHexHost,
|
|
||||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
|
||||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
|
||||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
|
||||||
FlagAddTrailingSlash: addTrailingSlash,
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
|
||||||
// It takes an URL string as input, as well as the normalization flags.
|
|
||||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
|
||||||
result, e := NormalizeURLString(u, f)
|
|
||||||
if e != nil {
|
|
||||||
panic(e)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
|
||||||
// It takes an URL string as input, as well as the normalization flags.
|
|
||||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
|
||||||
if parsed, e := url.Parse(u); e != nil {
|
|
||||||
return "", e
|
|
||||||
} else {
|
|
||||||
options := make([]precis.Option, 1, 3)
|
|
||||||
options[0] = precis.IgnoreCase
|
|
||||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
|
||||||
options = append(options, precis.FoldCase())
|
|
||||||
}
|
|
||||||
options = append(options, precis.Norm(norm.NFC))
|
|
||||||
profile := precis.NewFreeform(options...)
|
|
||||||
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
|
|
||||||
return "", e
|
|
||||||
}
|
|
||||||
return NormalizeURL(parsed, f), nil
|
|
||||||
}
|
|
||||||
panic("Unreachable code.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeURL returns the normalized string.
|
|
||||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
|
||||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
|
||||||
for _, k := range flagsOrder {
|
|
||||||
if f&k == k {
|
|
||||||
flags[k](u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return urlesc.Escape(u)
|
|
||||||
}
|
|
||||||
|
|
||||||
func lowercaseScheme(u *url.URL) {
|
|
||||||
if len(u.Scheme) > 0 {
|
|
||||||
u.Scheme = strings.ToLower(u.Scheme)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func lowercaseHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
u.Host = strings.ToLower(u.Host)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDefaultPort(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
scheme := strings.ToLower(u.Scheme)
|
|
||||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
|
||||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return val
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeTrailingSlash(u *url.URL) {
|
|
||||||
if l := len(u.Path); l > 0 {
|
|
||||||
if strings.HasSuffix(u.Path, "/") {
|
|
||||||
u.Path = u.Path[:l-1]
|
|
||||||
}
|
|
||||||
} else if l = len(u.Host); l > 0 {
|
|
||||||
if strings.HasSuffix(u.Host, "/") {
|
|
||||||
u.Host = u.Host[:l-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addTrailingSlash(u *url.URL) {
|
|
||||||
if l := len(u.Path); l > 0 {
|
|
||||||
if !strings.HasSuffix(u.Path, "/") {
|
|
||||||
u.Path += "/"
|
|
||||||
}
|
|
||||||
} else if l = len(u.Host); l > 0 {
|
|
||||||
if !strings.HasSuffix(u.Host, "/") {
|
|
||||||
u.Host += "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDotSegments(u *url.URL) {
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
var dotFree []string
|
|
||||||
var lastIsDot bool
|
|
||||||
|
|
||||||
sections := strings.Split(u.Path, "/")
|
|
||||||
for _, s := range sections {
|
|
||||||
if s == ".." {
|
|
||||||
if len(dotFree) > 0 {
|
|
||||||
dotFree = dotFree[:len(dotFree)-1]
|
|
||||||
}
|
|
||||||
} else if s != "." {
|
|
||||||
dotFree = append(dotFree, s)
|
|
||||||
}
|
|
||||||
lastIsDot = (s == "." || s == "..")
|
|
||||||
}
|
|
||||||
// Special case if host does not end with / and new path does not begin with /
|
|
||||||
u.Path = strings.Join(dotFree, "/")
|
|
||||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
|
||||||
u.Path = "/" + u.Path
|
|
||||||
}
|
|
||||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
|
||||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
|
||||||
u.Path += "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDirectoryIndex(u *url.URL) {
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeFragment(u *url.URL) {
|
|
||||||
u.Fragment = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func forceHTTP(u *url.URL) {
|
|
||||||
if strings.ToLower(u.Scheme) == "https" {
|
|
||||||
u.Scheme = "http"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDuplicateSlashes(u *url.URL) {
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeWWW(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
|
||||||
u.Host = u.Host[4:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addWWW(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
|
||||||
u.Host = "www." + u.Host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sortQuery(u *url.URL) {
|
|
||||||
q := u.Query()
|
|
||||||
|
|
||||||
if len(q) > 0 {
|
|
||||||
arKeys := make([]string, len(q))
|
|
||||||
i := 0
|
|
||||||
for k, _ := range q {
|
|
||||||
arKeys[i] = k
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sort.Strings(arKeys)
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
for _, k := range arKeys {
|
|
||||||
sort.Strings(q[k])
|
|
||||||
for _, v := range q[k] {
|
|
||||||
if buf.Len() > 0 {
|
|
||||||
buf.WriteRune('&')
|
|
||||||
}
|
|
||||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebuild the raw query string
|
|
||||||
u.RawQuery = buf.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeDWORDHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
|
||||||
var parts [4]int64
|
|
||||||
|
|
||||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
|
||||||
for i, shift := range []uint{24, 16, 8, 0} {
|
|
||||||
parts[i] = dword >> shift & 0xFF
|
|
||||||
}
|
|
||||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeOctalHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
|
||||||
var parts [4]int64
|
|
||||||
|
|
||||||
for i := 1; i <= 4; i++ {
|
|
||||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
|
||||||
}
|
|
||||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeHexHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
|
||||||
// Conversion is safe because of regex validation
|
|
||||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
|
||||||
// Set host as DWORD (base 10) encoded host
|
|
||||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
|
||||||
// The rest is the same as decoding a DWORD host
|
|
||||||
decodeDWORDHost(u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeUnncessaryHostDots(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
|
||||||
// Trim the leading and trailing dots
|
|
||||||
u.Host = strings.Trim(matches[1], ".")
|
|
||||||
if len(matches) > 2 {
|
|
||||||
u.Host += matches[2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeEmptyPortSeparator(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
|
||||||
}
|
|
||||||
}
|
|
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
|
||||||
======
|
|
||||||
|
|
||||||
Package urlesc implements query escaping as per RFC 3986.
|
|
||||||
|
|
||||||
It contains some parts of the net/url package, modified so as to allow
|
|
||||||
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
go get github.com/PuerkitoBio/urlesc
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
Go license (BSD-3-Clause)
|
|
||||||
|
|
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
@ -1,180 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package urlesc implements query escaping as per RFC 3986.
|
|
||||||
// It contains some parts of the net/url package, modified so as to allow
|
|
||||||
// some reserved characters incorrectly escaped by net/url.
|
|
||||||
// See https://github.com/golang/go/issues/5684
|
|
||||||
package urlesc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type encoding int
|
|
||||||
|
|
||||||
const (
|
|
||||||
encodePath encoding = 1 + iota
|
|
||||||
encodeUserPassword
|
|
||||||
encodeQueryComponent
|
|
||||||
encodeFragment
|
|
||||||
)
|
|
||||||
|
|
||||||
// Return true if the specified character should be escaped when
|
|
||||||
// appearing in a URL string, according to RFC 3986.
|
|
||||||
func shouldEscape(c byte, mode encoding) bool {
|
|
||||||
// §2.3 Unreserved characters (alphanum)
|
|
||||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
switch c {
|
|
||||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
|
||||||
return false
|
|
||||||
|
|
||||||
// §2.2 Reserved characters (reserved)
|
|
||||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
|
||||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
|
||||||
// Different sections of the URL allow a few of
|
|
||||||
// the reserved characters to appear unescaped.
|
|
||||||
switch mode {
|
|
||||||
case encodePath: // §3.3
|
|
||||||
// The RFC allows sub-delims and : @.
|
|
||||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
|
||||||
// segments. This package only manipulates the path as a whole,
|
|
||||||
// so we allow those as well. That leaves only ? and # to escape.
|
|
||||||
return c == '?' || c == '#'
|
|
||||||
|
|
||||||
case encodeUserPassword: // §3.2.1
|
|
||||||
// The RFC allows : and sub-delims in
|
|
||||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
|
||||||
// all the gen-delims.
|
|
||||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
|
||||||
|
|
||||||
case encodeQueryComponent: // §3.4
|
|
||||||
// The RFC allows / and ?.
|
|
||||||
return c != '/' && c != '?'
|
|
||||||
|
|
||||||
case encodeFragment: // §4.1
|
|
||||||
// The RFC text is silent but the grammar allows
|
|
||||||
// everything, so escape nothing but #
|
|
||||||
return c == '#'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Everything else must be escaped.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryEscape escapes the string so it can be safely placed
|
|
||||||
// inside a URL query.
|
|
||||||
func QueryEscape(s string) string {
|
|
||||||
return escape(s, encodeQueryComponent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func escape(s string, mode encoding) string {
|
|
||||||
spaceCount, hexCount := 0, 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
c := s[i]
|
|
||||||
if shouldEscape(c, mode) {
|
|
||||||
if c == ' ' && mode == encodeQueryComponent {
|
|
||||||
spaceCount++
|
|
||||||
} else {
|
|
||||||
hexCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if spaceCount == 0 && hexCount == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
t := make([]byte, len(s)+2*hexCount)
|
|
||||||
j := 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch c := s[i]; {
|
|
||||||
case c == ' ' && mode == encodeQueryComponent:
|
|
||||||
t[j] = '+'
|
|
||||||
j++
|
|
||||||
case shouldEscape(c, mode):
|
|
||||||
t[j] = '%'
|
|
||||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
|
||||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
|
||||||
j += 3
|
|
||||||
default:
|
|
||||||
t[j] = s[i]
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
var uiReplacer = strings.NewReplacer(
|
|
||||||
"%21", "!",
|
|
||||||
"%27", "'",
|
|
||||||
"%28", "(",
|
|
||||||
"%29", ")",
|
|
||||||
"%2A", "*",
|
|
||||||
)
|
|
||||||
|
|
||||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
|
||||||
func unescapeUserinfo(s string) string {
|
|
||||||
return uiReplacer.Replace(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Escape reassembles the URL into a valid URL string.
|
|
||||||
// The general form of the result is one of:
|
|
||||||
//
|
|
||||||
// scheme:opaque
|
|
||||||
// scheme://userinfo@host/path?query#fragment
|
|
||||||
//
|
|
||||||
// If u.Opaque is non-empty, String uses the first form;
|
|
||||||
// otherwise it uses the second form.
|
|
||||||
//
|
|
||||||
// In the second form, the following rules apply:
|
|
||||||
// - if u.Scheme is empty, scheme: is omitted.
|
|
||||||
// - if u.User is nil, userinfo@ is omitted.
|
|
||||||
// - if u.Host is empty, host/ is omitted.
|
|
||||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
|
||||||
// the entire scheme://userinfo@host/ is omitted.
|
|
||||||
// - if u.Host is non-empty and u.Path begins with a /,
|
|
||||||
// the form host/path does not add its own /.
|
|
||||||
// - if u.RawQuery is empty, ?query is omitted.
|
|
||||||
// - if u.Fragment is empty, #fragment is omitted.
|
|
||||||
func Escape(u *url.URL) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if u.Scheme != "" {
|
|
||||||
buf.WriteString(u.Scheme)
|
|
||||||
buf.WriteByte(':')
|
|
||||||
}
|
|
||||||
if u.Opaque != "" {
|
|
||||||
buf.WriteString(u.Opaque)
|
|
||||||
} else {
|
|
||||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
|
||||||
buf.WriteString("//")
|
|
||||||
if ui := u.User; ui != nil {
|
|
||||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
|
||||||
buf.WriteByte('@')
|
|
||||||
}
|
|
||||||
if h := u.Host; h != "" {
|
|
||||||
buf.WriteString(h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
|
||||||
buf.WriteByte('/')
|
|
||||||
}
|
|
||||||
buf.WriteString(escape(u.Path, encodePath))
|
|
||||||
}
|
|
||||||
if u.RawQuery != "" {
|
|
||||||
buf.WriteByte('?')
|
|
||||||
buf.WriteString(u.RawQuery)
|
|
||||||
}
|
|
||||||
if u.Fragment != "" {
|
|
||||||
buf.WriteByte('#')
|
|
||||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
202
vendor/github.com/go-openapi/jsonpointer/LICENSE
generated
vendored
202
vendor/github.com/go-openapi/jsonpointer/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
15
vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
15
vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
# gojsonpointer [](https://ci.vmware.run/go-openapi/jsonpointer) [](https://coverage.vmware.run/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
|
|
||||||
|
|
||||||
[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
|
|
||||||
An implementation of JSON Pointer - Go language
|
|
||||||
|
|
||||||
## Status
|
|
||||||
Completed YES
|
|
||||||
|
|
||||||
Tested YES
|
|
||||||
|
|
||||||
## References
|
|
||||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
|
||||||
|
|
||||||
### Note
|
|
||||||
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
|
|
238
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
238
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
@ -1,238 +0,0 @@
|
|||||||
// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author sigu-399
|
|
||||||
// author-github https://github.com/sigu-399
|
|
||||||
// author-mail sigu.399@gmail.com
|
|
||||||
//
|
|
||||||
// repository-name jsonpointer
|
|
||||||
// repository-desc An implementation of JSON Pointer - Go language
|
|
||||||
//
|
|
||||||
// description Main and unique file.
|
|
||||||
//
|
|
||||||
// created 25-02-2013
|
|
||||||
|
|
||||||
package jsonpointer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
emptyPointer = ``
|
|
||||||
pointerSeparator = `/`
|
|
||||||
|
|
||||||
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
|
|
||||||
)
|
|
||||||
|
|
||||||
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
|
|
||||||
|
|
||||||
// JSONPointable is an interface for structs to implement when they need to customize the
|
|
||||||
// json pointer process
|
|
||||||
type JSONPointable interface {
|
|
||||||
JSONLookup(string) (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type implStruct struct {
|
|
||||||
mode string // "SET" or "GET"
|
|
||||||
|
|
||||||
inDocument interface{}
|
|
||||||
|
|
||||||
setInValue interface{}
|
|
||||||
|
|
||||||
getOutNode interface{}
|
|
||||||
getOutKind reflect.Kind
|
|
||||||
outError error
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new json pointer for the given string
|
|
||||||
func New(jsonPointerString string) (Pointer, error) {
|
|
||||||
|
|
||||||
var p Pointer
|
|
||||||
err := p.parse(jsonPointerString)
|
|
||||||
return p, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pointer the json pointer reprsentation
|
|
||||||
type Pointer struct {
|
|
||||||
referenceTokens []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// "Constructor", parses the given string JSON pointer
|
|
||||||
func (p *Pointer) parse(jsonPointerString string) error {
|
|
||||||
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if jsonPointerString != emptyPointer {
|
|
||||||
if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
|
|
||||||
err = errors.New(invalidStart)
|
|
||||||
} else {
|
|
||||||
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
|
|
||||||
for _, referenceToken := range referenceTokens[1:] {
|
|
||||||
p.referenceTokens = append(p.referenceTokens, referenceToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get uses the pointer to retrieve a value from a JSON document
|
|
||||||
func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
|
|
||||||
return p.get(document, swag.DefaultJSONNameProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetForToken gets a value for a json pointer token 1 level deep
|
|
||||||
func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
|
|
||||||
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
|
||||||
kind := reflect.Invalid
|
|
||||||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
|
||||||
kind = rValue.Kind()
|
|
||||||
switch kind {
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
if rValue.Type().Implements(jsonPointableType) {
|
|
||||||
r, err := node.(JSONPointable).JSONLookup(decodedToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, kind, err
|
|
||||||
}
|
|
||||||
return r, kind, nil
|
|
||||||
}
|
|
||||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
|
||||||
if !ok {
|
|
||||||
return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
|
|
||||||
}
|
|
||||||
fld := rValue.FieldByName(nm)
|
|
||||||
return fld.Interface(), kind, nil
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
kv := reflect.ValueOf(decodedToken)
|
|
||||||
mv := rValue.MapIndex(kv)
|
|
||||||
if mv.IsValid() && !swag.IsZero(mv) {
|
|
||||||
return mv.Interface(), kind, nil
|
|
||||||
}
|
|
||||||
return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
tokenIndex, err := strconv.Atoi(decodedToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, kind, err
|
|
||||||
}
|
|
||||||
sLength := rValue.Len()
|
|
||||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
|
||||||
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
elem := rValue.Index(tokenIndex)
|
|
||||||
return elem.Interface(), kind, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
|
||||||
|
|
||||||
if nameProvider == nil {
|
|
||||||
nameProvider = swag.DefaultJSONNameProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
kind := reflect.Invalid
|
|
||||||
|
|
||||||
// Full document when empty
|
|
||||||
if len(p.referenceTokens) == 0 {
|
|
||||||
return node, kind, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, token := range p.referenceTokens {
|
|
||||||
|
|
||||||
decodedToken := Unescape(token)
|
|
||||||
|
|
||||||
r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
|
|
||||||
if err != nil {
|
|
||||||
return nil, knd, err
|
|
||||||
}
|
|
||||||
node, kind = r, knd
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
rValue := reflect.ValueOf(node)
|
|
||||||
kind = rValue.Kind()
|
|
||||||
|
|
||||||
return node, kind, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodedTokens returns the decoded tokens
|
|
||||||
func (p *Pointer) DecodedTokens() []string {
|
|
||||||
result := make([]string, 0, len(p.referenceTokens))
|
|
||||||
for _, t := range p.referenceTokens {
|
|
||||||
result = append(result, Unescape(t))
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEmpty returns true if this is an empty json pointer
|
|
||||||
// this indicates that it points to the root document
|
|
||||||
func (p *Pointer) IsEmpty() bool {
|
|
||||||
return len(p.referenceTokens) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pointer to string representation function
|
|
||||||
func (p *Pointer) String() string {
|
|
||||||
|
|
||||||
if len(p.referenceTokens) == 0 {
|
|
||||||
return emptyPointer
|
|
||||||
}
|
|
||||||
|
|
||||||
pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator)
|
|
||||||
|
|
||||||
return pointerString
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specific JSON pointer encoding here
|
|
||||||
// ~0 => ~
|
|
||||||
// ~1 => /
|
|
||||||
// ... and vice versa
|
|
||||||
|
|
||||||
const (
|
|
||||||
encRefTok0 = `~0`
|
|
||||||
encRefTok1 = `~1`
|
|
||||||
decRefTok0 = `~`
|
|
||||||
decRefTok1 = `/`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Unescape unescapes a json pointer reference token string to the original representation
|
|
||||||
func Unescape(token string) string {
|
|
||||||
step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
|
|
||||||
step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
|
|
||||||
return step2
|
|
||||||
}
|
|
||||||
|
|
||||||
// Escape escapes a pointer reference token string
|
|
||||||
func Escape(token string) string {
|
|
||||||
step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
|
|
||||||
step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
|
|
||||||
return step2
|
|
||||||
}
|
|
202
vendor/github.com/go-openapi/jsonreference/LICENSE
generated
vendored
202
vendor/github.com/go-openapi/jsonreference/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
15
vendor/github.com/go-openapi/jsonreference/README.md
generated
vendored
15
vendor/github.com/go-openapi/jsonreference/README.md
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
# gojsonreference [](https://ci.vmware.run/go-openapi/jsonreference) [](https://coverage.vmware.run/go-openapi/jsonreference) [](https://slackin.goswagger.io)
|
|
||||||
|
|
||||||
[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonreference)
|
|
||||||
An implementation of JSON Reference - Go language
|
|
||||||
|
|
||||||
## Status
|
|
||||||
Work in progress ( 90% done )
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
https://github.com/xeipuuv/gojsonpointer
|
|
||||||
|
|
||||||
## References
|
|
||||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
|
||||||
|
|
||||||
http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
|
|
156
vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
156
vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
@ -1,156 +0,0 @@
|
|||||||
// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author sigu-399
|
|
||||||
// author-github https://github.com/sigu-399
|
|
||||||
// author-mail sigu.399@gmail.com
|
|
||||||
//
|
|
||||||
// repository-name jsonreference
|
|
||||||
// repository-desc An implementation of JSON Reference - Go language
|
|
||||||
//
|
|
||||||
// description Main and unique file.
|
|
||||||
//
|
|
||||||
// created 26-02-2013
|
|
||||||
|
|
||||||
package jsonreference
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/PuerkitoBio/purell"
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fragmentRune = `#`
|
|
||||||
)
|
|
||||||
|
|
||||||
// New creates a new reference for the given string
|
|
||||||
func New(jsonReferenceString string) (Ref, error) {
|
|
||||||
|
|
||||||
var r Ref
|
|
||||||
err := r.parse(jsonReferenceString)
|
|
||||||
return r, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustCreateRef parses the ref string and panics when it's invalid.
|
|
||||||
// Use the New method for a version that returns an error
|
|
||||||
func MustCreateRef(ref string) Ref {
|
|
||||||
r, err := New(ref)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ref represents a json reference object
|
|
||||||
type Ref struct {
|
|
||||||
referenceURL *url.URL
|
|
||||||
referencePointer jsonpointer.Pointer
|
|
||||||
|
|
||||||
HasFullURL bool
|
|
||||||
HasURLPathOnly bool
|
|
||||||
HasFragmentOnly bool
|
|
||||||
HasFileScheme bool
|
|
||||||
HasFullFilePath bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetURL gets the URL for this reference
|
|
||||||
func (r *Ref) GetURL() *url.URL {
|
|
||||||
return r.referenceURL
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPointer gets the json pointer for this reference
|
|
||||||
func (r *Ref) GetPointer() *jsonpointer.Pointer {
|
|
||||||
return &r.referencePointer
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the best version of the url for this reference
|
|
||||||
func (r *Ref) String() string {
|
|
||||||
|
|
||||||
if r.referenceURL != nil {
|
|
||||||
return r.referenceURL.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.HasFragmentOnly {
|
|
||||||
return fragmentRune + r.referencePointer.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.referencePointer.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsRoot returns true if this reference is a root document
|
|
||||||
func (r *Ref) IsRoot() bool {
|
|
||||||
return r.referenceURL != nil &&
|
|
||||||
!r.IsCanonical() &&
|
|
||||||
!r.HasURLPathOnly &&
|
|
||||||
r.referenceURL.Fragment == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsCanonical returns true when this pointer starts with http(s):// or file://
|
|
||||||
func (r *Ref) IsCanonical() bool {
|
|
||||||
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
// "Constructor", parses the given string JSON reference
|
|
||||||
func (r *Ref) parse(jsonReferenceString string) error {
|
|
||||||
|
|
||||||
parsed, err := url.Parse(jsonReferenceString)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
|
||||||
refURL := r.referenceURL
|
|
||||||
|
|
||||||
if refURL.Scheme != "" && refURL.Host != "" {
|
|
||||||
r.HasFullURL = true
|
|
||||||
} else {
|
|
||||||
if refURL.Path != "" {
|
|
||||||
r.HasURLPathOnly = true
|
|
||||||
} else if refURL.RawQuery == "" && refURL.Fragment != "" {
|
|
||||||
r.HasFragmentOnly = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.HasFileScheme = refURL.Scheme == "file"
|
|
||||||
r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/")
|
|
||||||
|
|
||||||
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
|
|
||||||
r.referencePointer, _ = jsonpointer.New(refURL.Fragment)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inherits creates a new reference from a parent and a child
|
|
||||||
// If the child cannot inherit from the parent, an error is returned
|
|
||||||
func (r *Ref) Inherits(child Ref) (*Ref, error) {
|
|
||||||
childURL := child.GetURL()
|
|
||||||
parentURL := r.GetURL()
|
|
||||||
if childURL == nil {
|
|
||||||
return nil, errors.New("child url is nil")
|
|
||||||
}
|
|
||||||
if parentURL == nil {
|
|
||||||
return &child, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := New(parentURL.ResolveReference(childURL).String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &ref, nil
|
|
||||||
}
|
|
202
vendor/github.com/go-openapi/spec/LICENSE
generated
vendored
202
vendor/github.com/go-openapi/spec/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
5
vendor/github.com/go-openapi/spec/README.md
generated
vendored
5
vendor/github.com/go-openapi/spec/README.md
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
# OAI object model [](https://ci.vmware.run/go-openapi/spec) [](https://coverage.vmware.run/go-openapi/spec) [](https://slackin.goswagger.io)
|
|
||||||
|
|
||||||
[](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [](http://godoc.org/github.com/go-openapi/spec)
|
|
||||||
|
|
||||||
The object model for OpenAPI specification documents
|
|
274
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
274
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
File diff suppressed because one or more lines are too long
24
vendor/github.com/go-openapi/spec/contact_info.go
generated
vendored
24
vendor/github.com/go-openapi/spec/contact_info.go
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
// ContactInfo contact information for the exposed API.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#contactObject
|
|
||||||
type ContactInfo struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
URL string `json:"url,omitempty"`
|
|
||||||
Email string `json:"email,omitempty"`
|
|
||||||
}
|
|
626
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
626
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
@ -1,626 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResolutionCache a cache for resolving urls
|
|
||||||
type ResolutionCache interface {
|
|
||||||
Get(string) (interface{}, bool)
|
|
||||||
Set(string, interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
type simpleCache struct {
|
|
||||||
lock sync.Mutex
|
|
||||||
store map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var resCache = initResolutionCache()
|
|
||||||
|
|
||||||
func initResolutionCache() ResolutionCache {
|
|
||||||
return &simpleCache{store: map[string]interface{}{
|
|
||||||
"http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(),
|
|
||||||
"http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(),
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *simpleCache) Get(uri string) (interface{}, bool) {
|
|
||||||
s.lock.Lock()
|
|
||||||
v, ok := s.store[uri]
|
|
||||||
s.lock.Unlock()
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *simpleCache) Set(uri string, data interface{}) {
|
|
||||||
s.lock.Lock()
|
|
||||||
s.store[uri] = data
|
|
||||||
s.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveRef resolves a reference against a context root
|
|
||||||
func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
|
|
||||||
resolver, err := defaultSchemaLoader(root, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := new(Schema)
|
|
||||||
if err := resolver.Resolve(ref, result); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveParameter resolves a paramter reference against a context root
|
|
||||||
func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
|
|
||||||
resolver, err := defaultSchemaLoader(root, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := new(Parameter)
|
|
||||||
if err := resolver.Resolve(&ref, result); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveResponse resolves response a reference against a context root
|
|
||||||
func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
|
|
||||||
resolver, err := defaultSchemaLoader(root, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := new(Response)
|
|
||||||
if err := resolver.Resolve(&ref, result); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type schemaLoader struct {
|
|
||||||
loadingRef *Ref
|
|
||||||
startingRef *Ref
|
|
||||||
currentRef *Ref
|
|
||||||
root interface{}
|
|
||||||
cache ResolutionCache
|
|
||||||
loadDoc func(string) (json.RawMessage, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
var idPtr, _ = jsonpointer.New("/id")
|
|
||||||
var schemaPtr, _ = jsonpointer.New("/$schema")
|
|
||||||
var refPtr, _ = jsonpointer.New("/$ref")
|
|
||||||
|
|
||||||
func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) {
|
|
||||||
if cache == nil {
|
|
||||||
cache = resCache
|
|
||||||
}
|
|
||||||
|
|
||||||
var ptr *jsonpointer.Pointer
|
|
||||||
if ref != nil {
|
|
||||||
ptr = ref.GetPointer()
|
|
||||||
}
|
|
||||||
|
|
||||||
currentRef := nextRef(root, ref, ptr)
|
|
||||||
|
|
||||||
return &schemaLoader{
|
|
||||||
root: root,
|
|
||||||
loadingRef: ref,
|
|
||||||
startingRef: ref,
|
|
||||||
cache: cache,
|
|
||||||
loadDoc: func(path string) (json.RawMessage, error) {
|
|
||||||
data, err := swag.LoadFromFileOrHTTP(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return json.RawMessage(data), nil
|
|
||||||
},
|
|
||||||
currentRef: currentRef,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func idFromNode(node interface{}) (*Ref, error) {
|
|
||||||
if idValue, _, err := idPtr.Get(node); err == nil {
|
|
||||||
if refStr, ok := idValue.(string); ok && refStr != "" {
|
|
||||||
idRef, err := NewRef(refStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &idRef, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref {
|
|
||||||
if startingRef == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ptr == nil {
|
|
||||||
return startingRef
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := startingRef
|
|
||||||
var idRef *Ref
|
|
||||||
node := startingNode
|
|
||||||
|
|
||||||
for _, tok := range ptr.DecodedTokens() {
|
|
||||||
node, _, _ = jsonpointer.GetForToken(node, tok)
|
|
||||||
if node == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
idRef, _ = idFromNode(node)
|
|
||||||
if idRef != nil {
|
|
||||||
nw, err := ret.Inherits(*idRef)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ret = nw
|
|
||||||
}
|
|
||||||
|
|
||||||
refRef, _, _ := refPtr.Get(node)
|
|
||||||
if refRef != nil {
|
|
||||||
rf, _ := NewRef(refRef.(string))
|
|
||||||
nw, err := ret.Inherits(rf)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ret = nw
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
|
|
||||||
tgt := reflect.ValueOf(target)
|
|
||||||
if tgt.Kind() != reflect.Ptr {
|
|
||||||
return fmt.Errorf("resolve ref: target needs to be a pointer")
|
|
||||||
}
|
|
||||||
|
|
||||||
oldRef := currentRef
|
|
||||||
if currentRef != nil {
|
|
||||||
var err error
|
|
||||||
currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer()))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if currentRef == nil {
|
|
||||||
currentRef = ref
|
|
||||||
}
|
|
||||||
|
|
||||||
refURL := currentRef.GetURL()
|
|
||||||
if refURL == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if currentRef.IsRoot() {
|
|
||||||
nv := reflect.ValueOf(node)
|
|
||||||
reflect.Indirect(tgt).Set(reflect.Indirect(nv))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(refURL.String(), "#") {
|
|
||||||
res, _, err := ref.GetPointer().Get(node)
|
|
||||||
if err != nil {
|
|
||||||
res, _, err = ref.GetPointer().Get(r.root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rv := reflect.Indirect(reflect.ValueOf(res))
|
|
||||||
tgtType := reflect.Indirect(tgt).Type()
|
|
||||||
if rv.Type().AssignableTo(tgtType) {
|
|
||||||
reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res)))
|
|
||||||
} else {
|
|
||||||
if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if refURL.Scheme != "" && refURL.Host != "" {
|
|
||||||
// most definitely take the red pill
|
|
||||||
data, _, _, err := r.load(refURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((oldRef == nil && currentRef != nil) ||
|
|
||||||
(oldRef != nil && currentRef == nil) ||
|
|
||||||
oldRef.String() != currentRef.String()) &&
|
|
||||||
((oldRef == nil && ref != nil) ||
|
|
||||||
(oldRef != nil && ref == nil) ||
|
|
||||||
(oldRef.String() != ref.String())) {
|
|
||||||
|
|
||||||
return r.resolveRef(currentRef, ref, data, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
var res interface{}
|
|
||||||
if currentRef.String() != "" {
|
|
||||||
res, _, err = currentRef.GetPointer().Get(data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
res = data
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := swag.DynamicJSONToStruct(res, target); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
|
|
||||||
toFetch := *refURL
|
|
||||||
toFetch.Fragment = ""
|
|
||||||
|
|
||||||
data, fromCache := r.cache.Get(toFetch.String())
|
|
||||||
if !fromCache {
|
|
||||||
b, err := r.loadDoc(toFetch.String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, url.URL{}, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(b, &data); err != nil {
|
|
||||||
return nil, url.URL{}, false, err
|
|
||||||
}
|
|
||||||
r.cache.Set(toFetch.String(), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, toFetch, fromCache, nil
|
|
||||||
}
|
|
||||||
func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error {
|
|
||||||
if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type specExpander struct {
|
|
||||||
spec *Swagger
|
|
||||||
resolver *schemaLoader
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandSpec expands the references in a swagger spec
|
|
||||||
func ExpandSpec(spec *Swagger) error {
|
|
||||||
resolver, err := defaultSchemaLoader(spec, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, defintition := range spec.Definitions {
|
|
||||||
var def *Schema
|
|
||||||
var err error
|
|
||||||
if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
spec.Definitions[key] = *def
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, parameter := range spec.Parameters {
|
|
||||||
if err := expandParameter(¶meter, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
spec.Parameters[key] = parameter
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, response := range spec.Responses {
|
|
||||||
if err := expandResponse(&response, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
spec.Responses[key] = response
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.Paths != nil {
|
|
||||||
for key, path := range spec.Paths.Paths {
|
|
||||||
if err := expandPathItem(&path, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
spec.Paths.Paths[key] = path
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandSchema expands the refs in the schema object
|
|
||||||
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
|
|
||||||
|
|
||||||
if schema == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if root == nil {
|
|
||||||
root = schema
|
|
||||||
}
|
|
||||||
|
|
||||||
nrr, _ := NewRef(schema.ID)
|
|
||||||
var rrr *Ref
|
|
||||||
if nrr.String() != "" {
|
|
||||||
switch root.(type) {
|
|
||||||
case *Schema:
|
|
||||||
rid, _ := NewRef(root.(*Schema).ID)
|
|
||||||
rrr, _ = rid.Inherits(nrr)
|
|
||||||
case *Swagger:
|
|
||||||
rid, _ := NewRef(root.(*Swagger).ID)
|
|
||||||
rrr, _ = rid.Inherits(nrr)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
resolver, err := defaultSchemaLoader(root, rrr, cache)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
refs := []string{""}
|
|
||||||
if rrr != nil {
|
|
||||||
refs[0] = rrr.String()
|
|
||||||
}
|
|
||||||
var s *Schema
|
|
||||||
if s, err = expandSchema(*schema, refs, resolver); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
*schema = *s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) {
|
|
||||||
if target.Items != nil {
|
|
||||||
if target.Items.Schema != nil {
|
|
||||||
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
*target.Items.Schema = *t
|
|
||||||
}
|
|
||||||
for i := range target.Items.Schemas {
|
|
||||||
t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
target.Items.Schemas[i] = *t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &target, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) {
|
|
||||||
defer func() {
|
|
||||||
schema = &target
|
|
||||||
}()
|
|
||||||
if target.Ref.String() == "" && target.Ref.IsRoot() {
|
|
||||||
target = *resolver.root.(*Schema)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// t is the new expanded schema
|
|
||||||
var t *Schema
|
|
||||||
for target.Ref.String() != "" {
|
|
||||||
// var newTarget Schema
|
|
||||||
pRefs := strings.Join(parentRefs, ",")
|
|
||||||
pRefs += ","
|
|
||||||
if strings.Contains(pRefs, target.Ref.String()+",") {
|
|
||||||
err = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = resolver.Resolve(&target.Ref, &t); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
parentRefs = append(parentRefs, target.Ref.String())
|
|
||||||
target = *t
|
|
||||||
}
|
|
||||||
|
|
||||||
if t, err = expandItems(target, parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
target = *t
|
|
||||||
|
|
||||||
for i := range target.AllOf {
|
|
||||||
if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
target.AllOf[i] = *t
|
|
||||||
}
|
|
||||||
for i := range target.AnyOf {
|
|
||||||
if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
target.AnyOf[i] = *t
|
|
||||||
}
|
|
||||||
for i := range target.OneOf {
|
|
||||||
if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
target.OneOf[i] = *t
|
|
||||||
}
|
|
||||||
if target.Not != nil {
|
|
||||||
if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*target.Not = *t
|
|
||||||
}
|
|
||||||
for k, _ := range target.Properties {
|
|
||||||
if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
target.Properties[k] = *t
|
|
||||||
}
|
|
||||||
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
|
|
||||||
if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*target.AdditionalProperties.Schema = *t
|
|
||||||
}
|
|
||||||
for k, _ := range target.PatternProperties {
|
|
||||||
if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
target.PatternProperties[k] = *t
|
|
||||||
}
|
|
||||||
for k, _ := range target.Dependencies {
|
|
||||||
if target.Dependencies[k].Schema != nil {
|
|
||||||
if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*target.Dependencies[k].Schema = *t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
|
|
||||||
if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*target.AdditionalItems.Schema = *t
|
|
||||||
}
|
|
||||||
for k, _ := range target.Definitions {
|
|
||||||
if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
target.Definitions[k] = *t
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
|
|
||||||
if pathItem == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if pathItem.Ref.String() != "" {
|
|
||||||
if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for idx := range pathItem.Parameters {
|
|
||||||
if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := expandOperation(pathItem.Get, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := expandOperation(pathItem.Head, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := expandOperation(pathItem.Options, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := expandOperation(pathItem.Put, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := expandOperation(pathItem.Post, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := expandOperation(pathItem.Patch, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := expandOperation(pathItem.Delete, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandOperation(op *Operation, resolver *schemaLoader) error {
|
|
||||||
if op == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for i, param := range op.Parameters {
|
|
||||||
if err := expandParameter(¶m, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
op.Parameters[i] = param
|
|
||||||
}
|
|
||||||
|
|
||||||
if op.Responses != nil {
|
|
||||||
responses := op.Responses
|
|
||||||
if err := expandResponse(responses.Default, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for code, response := range responses.StatusCodeResponses {
|
|
||||||
if err := expandResponse(&response, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
responses.StatusCodeResponses[code] = response
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandResponse(response *Response, resolver *schemaLoader) error {
|
|
||||||
if response == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if response.Ref.String() != "" {
|
|
||||||
if err := resolver.Resolve(&response.Ref, response); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if response.Schema != nil {
|
|
||||||
parentRefs := []string{response.Schema.Ref.String()}
|
|
||||||
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
*response.Schema = *s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
|
|
||||||
if parameter == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if parameter.Ref.String() != "" {
|
|
||||||
if err := resolver.Resolve(¶meter.Ref, parameter); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if parameter.Schema != nil {
|
|
||||||
parentRefs := []string{parameter.Schema.Ref.String()}
|
|
||||||
if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil {
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
*parameter.Schema = *s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
24
vendor/github.com/go-openapi/spec/external_docs.go
generated
vendored
24
vendor/github.com/go-openapi/spec/external_docs.go
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
// ExternalDocumentation allows referencing an external resource for
|
|
||||||
// extended documentation.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#externalDocumentationObject
|
|
||||||
type ExternalDocumentation struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
URL string `json:"url,omitempty"`
|
|
||||||
}
|
|
165
vendor/github.com/go-openapi/spec/header.go
generated
vendored
165
vendor/github.com/go-openapi/spec/header.go
generated
vendored
@ -1,165 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HeaderProps struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header describes a header for a response of the API
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#headerObject
|
|
||||||
type Header struct {
|
|
||||||
CommonValidations
|
|
||||||
SimpleSchema
|
|
||||||
HeaderProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponseHeader creates a new header instance for use in a response
|
|
||||||
func ResponseHeader() *Header {
|
|
||||||
return new(Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDescription sets the description on this response, allows for chaining
|
|
||||||
func (h *Header) WithDescription(description string) *Header {
|
|
||||||
h.Description = description
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Typed a fluent builder method for the type of parameter
|
|
||||||
func (h *Header) Typed(tpe, format string) *Header {
|
|
||||||
h.Type = tpe
|
|
||||||
h.Format = format
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// CollectionOf a fluent builder method for an array item
|
|
||||||
func (h *Header) CollectionOf(items *Items, format string) *Header {
|
|
||||||
h.Type = "array"
|
|
||||||
h.Items = items
|
|
||||||
h.CollectionFormat = format
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefault sets the default value on this item
|
|
||||||
func (h *Header) WithDefault(defaultValue interface{}) *Header {
|
|
||||||
h.Default = defaultValue
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxLength sets a max length value
|
|
||||||
func (h *Header) WithMaxLength(max int64) *Header {
|
|
||||||
h.MaxLength = &max
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinLength sets a min length value
|
|
||||||
func (h *Header) WithMinLength(min int64) *Header {
|
|
||||||
h.MinLength = &min
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPattern sets a pattern value
|
|
||||||
func (h *Header) WithPattern(pattern string) *Header {
|
|
||||||
h.Pattern = pattern
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMultipleOf sets a multiple of value
|
|
||||||
func (h *Header) WithMultipleOf(number float64) *Header {
|
|
||||||
h.MultipleOf = &number
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaximum sets a maximum number value
|
|
||||||
func (h *Header) WithMaximum(max float64, exclusive bool) *Header {
|
|
||||||
h.Maximum = &max
|
|
||||||
h.ExclusiveMaximum = exclusive
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinimum sets a minimum number value
|
|
||||||
func (h *Header) WithMinimum(min float64, exclusive bool) *Header {
|
|
||||||
h.Minimum = &min
|
|
||||||
h.ExclusiveMinimum = exclusive
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEnum sets a the enum values (replace)
|
|
||||||
func (h *Header) WithEnum(values ...interface{}) *Header {
|
|
||||||
h.Enum = append([]interface{}{}, values...)
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxItems sets the max items
|
|
||||||
func (h *Header) WithMaxItems(size int64) *Header {
|
|
||||||
h.MaxItems = &size
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinItems sets the min items
|
|
||||||
func (h *Header) WithMinItems(size int64) *Header {
|
|
||||||
h.MinItems = &size
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueValues dictates that this array can only have unique items
|
|
||||||
func (h *Header) UniqueValues() *Header {
|
|
||||||
h.UniqueItems = true
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowDuplicates this array can have duplicates
|
|
||||||
func (h *Header) AllowDuplicates() *Header {
|
|
||||||
h.UniqueItems = false
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshal this to JSON
|
|
||||||
func (h Header) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(h.CommonValidations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(h.SimpleSchema)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b3, err := json.Marshal(h.HeaderProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b1, b2, b3), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON marshal this from JSON
|
|
||||||
func (h *Header) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &h.CommonValidations); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &h.HeaderProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
168
vendor/github.com/go-openapi/spec/info.go
generated
vendored
168
vendor/github.com/go-openapi/spec/info.go
generated
vendored
@ -1,168 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Extensions vendor specific extensions
|
|
||||||
type Extensions map[string]interface{}
|
|
||||||
|
|
||||||
// Add adds a value to these extensions
|
|
||||||
func (e Extensions) Add(key string, value interface{}) {
|
|
||||||
realKey := strings.ToLower(key)
|
|
||||||
e[realKey] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetString gets a string value from the extensions
|
|
||||||
func (e Extensions) GetString(key string) (string, bool) {
|
|
||||||
if v, ok := e[strings.ToLower(key)]; ok {
|
|
||||||
str, ok := v.(string)
|
|
||||||
return str, ok
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBool gets a string value from the extensions
|
|
||||||
func (e Extensions) GetBool(key string) (bool, bool) {
|
|
||||||
if v, ok := e[strings.ToLower(key)]; ok {
|
|
||||||
str, ok := v.(bool)
|
|
||||||
return str, ok
|
|
||||||
}
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringSlice gets a string value from the extensions
|
|
||||||
func (e Extensions) GetStringSlice(key string) ([]string, bool) {
|
|
||||||
if v, ok := e[strings.ToLower(key)]; ok {
|
|
||||||
arr, ok := v.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
var strs []string
|
|
||||||
for _, iface := range arr {
|
|
||||||
str, ok := iface.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
strs = append(strs, str)
|
|
||||||
}
|
|
||||||
return strs, ok
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// VendorExtensible composition block.
|
|
||||||
type VendorExtensible struct {
|
|
||||||
Extensions Extensions
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddExtension adds an extension to this extensible object
|
|
||||||
func (v *VendorExtensible) AddExtension(key string, value interface{}) {
|
|
||||||
if value == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if v.Extensions == nil {
|
|
||||||
v.Extensions = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
v.Extensions.Add(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the extensions to json
|
|
||||||
func (v VendorExtensible) MarshalJSON() ([]byte, error) {
|
|
||||||
toser := make(map[string]interface{})
|
|
||||||
for k, v := range v.Extensions {
|
|
||||||
lk := strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(lk, "x-") {
|
|
||||||
toser[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return json.Marshal(toser)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON for this extensible object
|
|
||||||
func (v *VendorExtensible) UnmarshalJSON(data []byte) error {
|
|
||||||
var d map[string]interface{}
|
|
||||||
if err := json.Unmarshal(data, &d); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for k, vv := range d {
|
|
||||||
lk := strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(lk, "x-") {
|
|
||||||
if v.Extensions == nil {
|
|
||||||
v.Extensions = map[string]interface{}{}
|
|
||||||
}
|
|
||||||
v.Extensions[k] = vv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InfoProps the properties for an info definition
|
|
||||||
type InfoProps struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
Title string `json:"title,omitempty"`
|
|
||||||
TermsOfService string `json:"termsOfService,omitempty"`
|
|
||||||
Contact *ContactInfo `json:"contact,omitempty"`
|
|
||||||
License *License `json:"license,omitempty"`
|
|
||||||
Version string `json:"version,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info object provides metadata about the API.
|
|
||||||
// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#infoObject
|
|
||||||
type Info struct {
|
|
||||||
VendorExtensible
|
|
||||||
InfoProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup look up a value by the json property name
|
|
||||||
func (i Info) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := i.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
r, _, err := jsonpointer.GetForToken(i.InfoProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshal this to JSON
|
|
||||||
func (i Info) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(i.InfoProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(i.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b1, b2), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON marshal this from JSON
|
|
||||||
func (i *Info) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &i.InfoProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &i.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
199
vendor/github.com/go-openapi/spec/items.go
generated
vendored
199
vendor/github.com/go-openapi/spec/items.go
generated
vendored
@ -1,199 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SimpleSchema struct {
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
Format string `json:"format,omitempty"`
|
|
||||||
Items *Items `json:"items,omitempty"`
|
|
||||||
CollectionFormat string `json:"collectionFormat,omitempty"`
|
|
||||||
Default interface{} `json:"default,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SimpleSchema) TypeName() string {
|
|
||||||
if s.Format != "" {
|
|
||||||
return s.Format
|
|
||||||
}
|
|
||||||
return s.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SimpleSchema) ItemsTypeName() string {
|
|
||||||
if s.Items == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return s.Items.TypeName()
|
|
||||||
}
|
|
||||||
|
|
||||||
type CommonValidations struct {
|
|
||||||
Maximum *float64 `json:"maximum,omitempty"`
|
|
||||||
ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
|
|
||||||
Minimum *float64 `json:"minimum,omitempty"`
|
|
||||||
ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
|
|
||||||
MaxLength *int64 `json:"maxLength,omitempty"`
|
|
||||||
MinLength *int64 `json:"minLength,omitempty"`
|
|
||||||
Pattern string `json:"pattern,omitempty"`
|
|
||||||
MaxItems *int64 `json:"maxItems,omitempty"`
|
|
||||||
MinItems *int64 `json:"minItems,omitempty"`
|
|
||||||
UniqueItems bool `json:"uniqueItems,omitempty"`
|
|
||||||
MultipleOf *float64 `json:"multipleOf,omitempty"`
|
|
||||||
Enum []interface{} `json:"enum,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Items a limited subset of JSON-Schema's items object.
|
|
||||||
// It is used by parameter definitions that are not located in "body".
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#items-object-
|
|
||||||
type Items struct {
|
|
||||||
Refable
|
|
||||||
CommonValidations
|
|
||||||
SimpleSchema
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewItems creates a new instance of items
|
|
||||||
func NewItems() *Items {
|
|
||||||
return &Items{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Typed a fluent builder method for the type of item
|
|
||||||
func (i *Items) Typed(tpe, format string) *Items {
|
|
||||||
i.Type = tpe
|
|
||||||
i.Format = format
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// CollectionOf a fluent builder method for an array item
|
|
||||||
func (i *Items) CollectionOf(items *Items, format string) *Items {
|
|
||||||
i.Type = "array"
|
|
||||||
i.Items = items
|
|
||||||
i.CollectionFormat = format
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefault sets the default value on this item
|
|
||||||
func (i *Items) WithDefault(defaultValue interface{}) *Items {
|
|
||||||
i.Default = defaultValue
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxLength sets a max length value
|
|
||||||
func (i *Items) WithMaxLength(max int64) *Items {
|
|
||||||
i.MaxLength = &max
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinLength sets a min length value
|
|
||||||
func (i *Items) WithMinLength(min int64) *Items {
|
|
||||||
i.MinLength = &min
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPattern sets a pattern value
|
|
||||||
func (i *Items) WithPattern(pattern string) *Items {
|
|
||||||
i.Pattern = pattern
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMultipleOf sets a multiple of value
|
|
||||||
func (i *Items) WithMultipleOf(number float64) *Items {
|
|
||||||
i.MultipleOf = &number
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaximum sets a maximum number value
|
|
||||||
func (i *Items) WithMaximum(max float64, exclusive bool) *Items {
|
|
||||||
i.Maximum = &max
|
|
||||||
i.ExclusiveMaximum = exclusive
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinimum sets a minimum number value
|
|
||||||
func (i *Items) WithMinimum(min float64, exclusive bool) *Items {
|
|
||||||
i.Minimum = &min
|
|
||||||
i.ExclusiveMinimum = exclusive
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEnum sets a the enum values (replace)
|
|
||||||
func (i *Items) WithEnum(values ...interface{}) *Items {
|
|
||||||
i.Enum = append([]interface{}{}, values...)
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxItems sets the max items
|
|
||||||
func (i *Items) WithMaxItems(size int64) *Items {
|
|
||||||
i.MaxItems = &size
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinItems sets the min items
|
|
||||||
func (i *Items) WithMinItems(size int64) *Items {
|
|
||||||
i.MinItems = &size
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueValues dictates that this array can only have unique items
|
|
||||||
func (i *Items) UniqueValues() *Items {
|
|
||||||
i.UniqueItems = true
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowDuplicates this array can have duplicates
|
|
||||||
func (i *Items) AllowDuplicates() *Items {
|
|
||||||
i.UniqueItems = false
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
|
||||||
func (i *Items) UnmarshalJSON(data []byte) error {
|
|
||||||
var validations CommonValidations
|
|
||||||
if err := json.Unmarshal(data, &validations); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var ref Refable
|
|
||||||
if err := json.Unmarshal(data, &ref); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var simpleSchema SimpleSchema
|
|
||||||
if err := json.Unmarshal(data, &simpleSchema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
i.Refable = ref
|
|
||||||
i.CommonValidations = validations
|
|
||||||
i.SimpleSchema = simpleSchema
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this items object to JSON
|
|
||||||
func (i Items) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(i.CommonValidations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(i.SimpleSchema)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b3, err := json.Marshal(i.Refable)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b3, b1, b2), nil
|
|
||||||
}
|
|
23
vendor/github.com/go-openapi/spec/license.go
generated
vendored
23
vendor/github.com/go-openapi/spec/license.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
// License information for the exposed API.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#licenseObject
|
|
||||||
type License struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
URL string `json:"url,omitempty"`
|
|
||||||
}
|
|
233
vendor/github.com/go-openapi/spec/operation.go
generated
vendored
233
vendor/github.com/go-openapi/spec/operation.go
generated
vendored
@ -1,233 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
type OperationProps struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
Consumes []string `json:"consumes,omitempty"`
|
|
||||||
Produces []string `json:"produces,omitempty"`
|
|
||||||
Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss]
|
|
||||||
Tags []string `json:"tags,omitempty"`
|
|
||||||
Summary string `json:"summary,omitempty"`
|
|
||||||
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
|
|
||||||
ID string `json:"operationId,omitempty"`
|
|
||||||
Deprecated bool `json:"deprecated,omitempty"`
|
|
||||||
Security []map[string][]string `json:"security,omitempty"`
|
|
||||||
Parameters []Parameter `json:"parameters,omitempty"`
|
|
||||||
Responses *Responses `json:"responses,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Operation describes a single API operation on a path.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#operationObject
|
|
||||||
type Operation struct {
|
|
||||||
VendorExtensible
|
|
||||||
OperationProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// SuccessResponse gets a success response model
|
|
||||||
func (o *Operation) SuccessResponse() (*Response, int, bool) {
|
|
||||||
if o.Responses == nil {
|
|
||||||
return nil, 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range o.Responses.StatusCodeResponses {
|
|
||||||
if k/100 == 2 {
|
|
||||||
return &v, k, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.Responses.Default, 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup look up a value by the json property name
|
|
||||||
func (o Operation) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := o.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
r, _, err := jsonpointer.GetForToken(o.OperationProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
|
||||||
func (o *Operation) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &o.OperationProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &o.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this items object to JSON
|
|
||||||
func (o Operation) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(o.OperationProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(o.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
concated := swag.ConcatJSON(b1, b2)
|
|
||||||
return concated, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOperation creates a new operation instance.
|
|
||||||
// It expects an ID as parameter but not passing an ID is also valid.
|
|
||||||
func NewOperation(id string) *Operation {
|
|
||||||
op := new(Operation)
|
|
||||||
op.ID = id
|
|
||||||
return op
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithID sets the ID property on this operation, allows for chaining.
|
|
||||||
func (o *Operation) WithID(id string) *Operation {
|
|
||||||
o.ID = id
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDescription sets the description on this operation, allows for chaining
|
|
||||||
func (o *Operation) WithDescription(description string) *Operation {
|
|
||||||
o.Description = description
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSummary sets the summary on this operation, allows for chaining
|
|
||||||
func (o *Operation) WithSummary(summary string) *Operation {
|
|
||||||
o.Summary = summary
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithExternalDocs sets/removes the external docs for/from this operation.
|
|
||||||
// When you pass empty strings as params the external documents will be removed.
|
|
||||||
// When you pass non-empty string as one value then those values will be used on the external docs object.
|
|
||||||
// So when you pass a non-empty description, you should also pass the url and vice versa.
|
|
||||||
func (o *Operation) WithExternalDocs(description, url string) *Operation {
|
|
||||||
if description == "" && url == "" {
|
|
||||||
o.ExternalDocs = nil
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.ExternalDocs == nil {
|
|
||||||
o.ExternalDocs = &ExternalDocumentation{}
|
|
||||||
}
|
|
||||||
o.ExternalDocs.Description = description
|
|
||||||
o.ExternalDocs.URL = url
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecate marks the operation as deprecated
|
|
||||||
func (o *Operation) Deprecate() *Operation {
|
|
||||||
o.Deprecated = true
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// Undeprecate marks the operation as not deprected
|
|
||||||
func (o *Operation) Undeprecate() *Operation {
|
|
||||||
o.Deprecated = false
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithConsumes adds media types for incoming body values
|
|
||||||
func (o *Operation) WithConsumes(mediaTypes ...string) *Operation {
|
|
||||||
o.Consumes = append(o.Consumes, mediaTypes...)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithProduces adds media types for outgoing body values
|
|
||||||
func (o *Operation) WithProduces(mediaTypes ...string) *Operation {
|
|
||||||
o.Produces = append(o.Produces, mediaTypes...)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithTags adds tags for this operation
|
|
||||||
func (o *Operation) WithTags(tags ...string) *Operation {
|
|
||||||
o.Tags = append(o.Tags, tags...)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddParam adds a parameter to this operation, when a parameter for that location
|
|
||||||
// and with that name already exists it will be replaced
|
|
||||||
func (o *Operation) AddParam(param *Parameter) *Operation {
|
|
||||||
if param == nil {
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, p := range o.Parameters {
|
|
||||||
if p.Name == param.Name && p.In == param.In {
|
|
||||||
params := append(o.Parameters[:i], *param)
|
|
||||||
params = append(params, o.Parameters[i+1:]...)
|
|
||||||
o.Parameters = params
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
o.Parameters = append(o.Parameters, *param)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveParam removes a parameter from the operation
|
|
||||||
func (o *Operation) RemoveParam(name, in string) *Operation {
|
|
||||||
for i, p := range o.Parameters {
|
|
||||||
if p.Name == name && p.In == name {
|
|
||||||
o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecuredWith adds a security scope to this operation.
|
|
||||||
func (o *Operation) SecuredWith(name string, scopes ...string) *Operation {
|
|
||||||
o.Security = append(o.Security, map[string][]string{name: scopes})
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefaultResponse adds a default response to the operation.
|
|
||||||
// Passing a nil value will remove the response
|
|
||||||
func (o *Operation) WithDefaultResponse(response *Response) *Operation {
|
|
||||||
return o.RespondsWith(0, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RespondsWith adds a status code response to the operation.
|
|
||||||
// When the code is 0 the value of the response will be used as default response value.
|
|
||||||
// When the value of the response is nil it will be removed from the operation
|
|
||||||
func (o *Operation) RespondsWith(code int, response *Response) *Operation {
|
|
||||||
if o.Responses == nil {
|
|
||||||
o.Responses = new(Responses)
|
|
||||||
}
|
|
||||||
if code == 0 {
|
|
||||||
o.Responses.Default = response
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
if response == nil {
|
|
||||||
delete(o.Responses.StatusCodeResponses, code)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
if o.Responses.StatusCodeResponses == nil {
|
|
||||||
o.Responses.StatusCodeResponses = make(map[int]Response)
|
|
||||||
}
|
|
||||||
o.Responses.StatusCodeResponses[code] = *response
|
|
||||||
return o
|
|
||||||
}
|
|
299
vendor/github.com/go-openapi/spec/parameter.go
generated
vendored
299
vendor/github.com/go-openapi/spec/parameter.go
generated
vendored
@ -1,299 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// QueryParam creates a query parameter
|
|
||||||
func QueryParam(name string) *Parameter {
|
|
||||||
return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderParam creates a header parameter, this is always required by default
|
|
||||||
func HeaderParam(name string) *Parameter {
|
|
||||||
return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathParam creates a path parameter, this is always required
|
|
||||||
func PathParam(name string) *Parameter {
|
|
||||||
return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyParam creates a body parameter
|
|
||||||
func BodyParam(name string, schema *Schema) *Parameter {
|
|
||||||
return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormDataParam creates a body parameter
|
|
||||||
func FormDataParam(name string) *Parameter {
|
|
||||||
return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileParam creates a body parameter
|
|
||||||
func FileParam(name string) *Parameter {
|
|
||||||
return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SimpleArrayParam creates a param for a simple array (string, int, date etc)
|
|
||||||
func SimpleArrayParam(name, tpe, fmt string) *Parameter {
|
|
||||||
return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParamRef creates a parameter that's a json reference
|
|
||||||
func ParamRef(uri string) *Parameter {
|
|
||||||
p := new(Parameter)
|
|
||||||
p.Ref = MustCreateRef(uri)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParamProps struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
In string `json:"in,omitempty"`
|
|
||||||
Required bool `json:"required,omitempty"`
|
|
||||||
Schema *Schema `json:"schema,omitempty"` // when in == "body"
|
|
||||||
AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
|
|
||||||
//
|
|
||||||
// There are five possible parameter types.
|
|
||||||
// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`.
|
|
||||||
// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
|
|
||||||
// * Header - Custom headers that are expected as part of the request.
|
|
||||||
// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation.
|
|
||||||
// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4):
|
|
||||||
// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred.
|
|
||||||
// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#parameterObject
|
|
||||||
type Parameter struct {
|
|
||||||
Refable
|
|
||||||
CommonValidations
|
|
||||||
SimpleSchema
|
|
||||||
VendorExtensible
|
|
||||||
ParamProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup look up a value by the json property name
|
|
||||||
func (p Parameter) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := p.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
if token == "$ref" {
|
|
||||||
return &p.Ref, nil
|
|
||||||
}
|
|
||||||
r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if r != nil {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if r != nil {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
r, _, err = jsonpointer.GetForToken(p.ParamProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDescription a fluent builder method for the description of the parameter
|
|
||||||
func (p *Parameter) WithDescription(description string) *Parameter {
|
|
||||||
p.Description = description
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Named a fluent builder method to override the name of the parameter
|
|
||||||
func (p *Parameter) Named(name string) *Parameter {
|
|
||||||
p.Name = name
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLocation a fluent builder method to override the location of the parameter
|
|
||||||
func (p *Parameter) WithLocation(in string) *Parameter {
|
|
||||||
p.In = in
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Typed a fluent builder method for the type of the parameter value
|
|
||||||
func (p *Parameter) Typed(tpe, format string) *Parameter {
|
|
||||||
p.Type = tpe
|
|
||||||
p.Format = format
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// CollectionOf a fluent builder method for an array parameter
|
|
||||||
func (p *Parameter) CollectionOf(items *Items, format string) *Parameter {
|
|
||||||
p.Type = "array"
|
|
||||||
p.Items = items
|
|
||||||
p.CollectionFormat = format
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefault sets the default value on this parameter
|
|
||||||
func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter {
|
|
||||||
p.AsOptional() // with default implies optional
|
|
||||||
p.Default = defaultValue
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowsEmptyValues flags this parameter as being ok with empty values
|
|
||||||
func (p *Parameter) AllowsEmptyValues() *Parameter {
|
|
||||||
p.AllowEmptyValue = true
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// NoEmptyValues flags this parameter as not liking empty values
|
|
||||||
func (p *Parameter) NoEmptyValues() *Parameter {
|
|
||||||
p.AllowEmptyValue = false
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsOptional flags this parameter as optional
|
|
||||||
func (p *Parameter) AsOptional() *Parameter {
|
|
||||||
p.Required = false
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsRequired flags this parameter as required
|
|
||||||
func (p *Parameter) AsRequired() *Parameter {
|
|
||||||
if p.Default != nil { // with a default required makes no sense
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
p.Required = true
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxLength sets a max length value
|
|
||||||
func (p *Parameter) WithMaxLength(max int64) *Parameter {
|
|
||||||
p.MaxLength = &max
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinLength sets a min length value
|
|
||||||
func (p *Parameter) WithMinLength(min int64) *Parameter {
|
|
||||||
p.MinLength = &min
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPattern sets a pattern value
|
|
||||||
func (p *Parameter) WithPattern(pattern string) *Parameter {
|
|
||||||
p.Pattern = pattern
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMultipleOf sets a multiple of value
|
|
||||||
func (p *Parameter) WithMultipleOf(number float64) *Parameter {
|
|
||||||
p.MultipleOf = &number
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaximum sets a maximum number value
|
|
||||||
func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter {
|
|
||||||
p.Maximum = &max
|
|
||||||
p.ExclusiveMaximum = exclusive
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinimum sets a minimum number value
|
|
||||||
func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter {
|
|
||||||
p.Minimum = &min
|
|
||||||
p.ExclusiveMinimum = exclusive
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEnum sets a the enum values (replace)
|
|
||||||
func (p *Parameter) WithEnum(values ...interface{}) *Parameter {
|
|
||||||
p.Enum = append([]interface{}{}, values...)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxItems sets the max items
|
|
||||||
func (p *Parameter) WithMaxItems(size int64) *Parameter {
|
|
||||||
p.MaxItems = &size
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinItems sets the min items
|
|
||||||
func (p *Parameter) WithMinItems(size int64) *Parameter {
|
|
||||||
p.MinItems = &size
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueValues dictates that this array can only have unique items
|
|
||||||
func (p *Parameter) UniqueValues() *Parameter {
|
|
||||||
p.UniqueItems = true
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowDuplicates this array can have duplicates
|
|
||||||
func (p *Parameter) AllowDuplicates() *Parameter {
|
|
||||||
p.UniqueItems = false
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
|
||||||
func (p *Parameter) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &p.CommonValidations); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &p.Refable); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &p.SimpleSchema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &p.ParamProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this items object to JSON
|
|
||||||
func (p Parameter) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(p.CommonValidations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(p.SimpleSchema)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b3, err := json.Marshal(p.Refable)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b4, err := json.Marshal(p.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b5, err := json.Marshal(p.ParamProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b3, b1, b2, b4, b5), nil
|
|
||||||
}
|
|
90
vendor/github.com/go-openapi/spec/path_item.go
generated
vendored
90
vendor/github.com/go-openapi/spec/path_item.go
generated
vendored
@ -1,90 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// pathItemProps the path item specific properties
|
|
||||||
type PathItemProps struct {
|
|
||||||
Get *Operation `json:"get,omitempty"`
|
|
||||||
Put *Operation `json:"put,omitempty"`
|
|
||||||
Post *Operation `json:"post,omitempty"`
|
|
||||||
Delete *Operation `json:"delete,omitempty"`
|
|
||||||
Options *Operation `json:"options,omitempty"`
|
|
||||||
Head *Operation `json:"head,omitempty"`
|
|
||||||
Patch *Operation `json:"patch,omitempty"`
|
|
||||||
Parameters []Parameter `json:"parameters,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathItem describes the operations available on a single path.
|
|
||||||
// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
|
|
||||||
// The path itself is still exposed to the documentation viewer but they will
|
|
||||||
// not know which operations and parameters are available.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#pathItemObject
|
|
||||||
type PathItem struct {
|
|
||||||
Refable
|
|
||||||
VendorExtensible
|
|
||||||
PathItemProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup look up a value by the json property name
|
|
||||||
func (p PathItem) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := p.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
if token == "$ref" {
|
|
||||||
return &p.Ref, nil
|
|
||||||
}
|
|
||||||
r, _, err := jsonpointer.GetForToken(p.PathItemProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
|
||||||
func (p *PathItem) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &p.Refable); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &p.PathItemProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this items object to JSON
|
|
||||||
func (p PathItem) MarshalJSON() ([]byte, error) {
|
|
||||||
b3, err := json.Marshal(p.Refable)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b4, err := json.Marshal(p.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b5, err := json.Marshal(p.PathItemProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
concated := swag.ConcatJSON(b3, b4, b5)
|
|
||||||
return concated, nil
|
|
||||||
}
|
|
97
vendor/github.com/go-openapi/spec/paths.go
generated
vendored
97
vendor/github.com/go-openapi/spec/paths.go
generated
vendored
@ -1,97 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Paths holds the relative paths to the individual endpoints.
|
|
||||||
// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order
|
|
||||||
// to construct the full URL.
|
|
||||||
// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#pathsObject
|
|
||||||
type Paths struct {
|
|
||||||
VendorExtensible
|
|
||||||
Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup look up a value by the json property name
|
|
||||||
func (p Paths) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if pi, ok := p.Paths[token]; ok {
|
|
||||||
return &pi, nil
|
|
||||||
}
|
|
||||||
if ex, ok := p.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("object has no field %q", token)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
|
||||||
func (p *Paths) UnmarshalJSON(data []byte) error {
|
|
||||||
var res map[string]json.RawMessage
|
|
||||||
if err := json.Unmarshal(data, &res); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for k, v := range res {
|
|
||||||
if strings.HasPrefix(strings.ToLower(k), "x-") {
|
|
||||||
if p.Extensions == nil {
|
|
||||||
p.Extensions = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
var d interface{}
|
|
||||||
if err := json.Unmarshal(v, &d); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.Extensions[k] = d
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(k, "/") {
|
|
||||||
if p.Paths == nil {
|
|
||||||
p.Paths = make(map[string]PathItem)
|
|
||||||
}
|
|
||||||
var pi PathItem
|
|
||||||
if err := json.Unmarshal(v, &pi); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.Paths[k] = pi
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this items object to JSON
|
|
||||||
func (p Paths) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(p.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
pths := make(map[string]PathItem)
|
|
||||||
for k, v := range p.Paths {
|
|
||||||
if strings.HasPrefix(k, "/") {
|
|
||||||
pths[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(pths)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
concated := swag.ConcatJSON(b1, b2)
|
|
||||||
return concated, nil
|
|
||||||
}
|
|
167
vendor/github.com/go-openapi/spec/ref.go
generated
vendored
167
vendor/github.com/go-openapi/spec/ref.go
generated
vendored
@ -1,167 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonreference"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Refable is a struct for things that accept a $ref property
|
|
||||||
type Refable struct {
|
|
||||||
Ref Ref
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the ref to json
|
|
||||||
func (r Refable) MarshalJSON() ([]byte, error) {
|
|
||||||
return r.Ref.MarshalJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshalss the ref from json
|
|
||||||
func (r *Refable) UnmarshalJSON(d []byte) error {
|
|
||||||
return json.Unmarshal(d, &r.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ref represents a json reference that is potentially resolved
|
|
||||||
type Ref struct {
|
|
||||||
jsonreference.Ref
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteURI gets the remote uri part of the ref
|
|
||||||
func (r *Ref) RemoteURI() string {
|
|
||||||
if r.String() == "" {
|
|
||||||
return r.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
u := *r.GetURL()
|
|
||||||
u.Fragment = ""
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidURI returns true when the url the ref points to can be found
|
|
||||||
func (r *Ref) IsValidURI() bool {
|
|
||||||
if r.String() == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
v := r.RemoteURI()
|
|
||||||
if v == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.HasFullURL {
|
|
||||||
rr, err := http.Get(v)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return rr.StatusCode/100 == 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// check for local file
|
|
||||||
pth := v
|
|
||||||
if r.HasURLPathOnly {
|
|
||||||
p, e := filepath.Abs(pth)
|
|
||||||
if e != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
pth = p
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := os.Stat(pth)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return !fi.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inherits creates a new reference from a parent and a child
|
|
||||||
// If the child cannot inherit from the parent, an error is returned
|
|
||||||
func (r *Ref) Inherits(child Ref) (*Ref, error) {
|
|
||||||
ref, err := r.Ref.Inherits(child.Ref)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Ref{Ref: *ref}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRef creates a new instance of a ref object
|
|
||||||
// returns an error when the reference uri is an invalid uri
|
|
||||||
func NewRef(refURI string) (Ref, error) {
|
|
||||||
ref, err := jsonreference.New(refURI)
|
|
||||||
if err != nil {
|
|
||||||
return Ref{}, err
|
|
||||||
}
|
|
||||||
return Ref{Ref: ref}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustCreateRef creates a ref object but
|
|
||||||
func MustCreateRef(refURI string) Ref {
|
|
||||||
return Ref{Ref: jsonreference.MustCreateRef(refURI)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// // NewResolvedRef creates a resolved ref
|
|
||||||
// func NewResolvedRef(refURI string, data interface{}) Ref {
|
|
||||||
// return Ref{
|
|
||||||
// Ref: jsonreference.MustCreateRef(refURI),
|
|
||||||
// Resolved: data,
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// MarshalJSON marshals this ref into a JSON object
|
|
||||||
func (r Ref) MarshalJSON() ([]byte, error) {
|
|
||||||
str := r.String()
|
|
||||||
if str == "" {
|
|
||||||
if r.IsRoot() {
|
|
||||||
return []byte(`{"$ref":"#"}`), nil
|
|
||||||
}
|
|
||||||
return []byte("{}"), nil
|
|
||||||
}
|
|
||||||
v := map[string]interface{}{"$ref": str}
|
|
||||||
return json.Marshal(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals this ref from a JSON object
|
|
||||||
func (r *Ref) UnmarshalJSON(d []byte) error {
|
|
||||||
var v map[string]interface{}
|
|
||||||
if err := json.Unmarshal(d, &v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if vv, ok := v["$ref"]; ok {
|
|
||||||
if str, ok := vv.(string); ok {
|
|
||||||
ref, err := jsonreference.New(str)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*r = Ref{Ref: ref}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
113
vendor/github.com/go-openapi/spec/response.go
generated
vendored
113
vendor/github.com/go-openapi/spec/response.go
generated
vendored
@ -1,113 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResponseProps properties specific to a response
|
|
||||||
type ResponseProps struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
Schema *Schema `json:"schema,omitempty"`
|
|
||||||
Headers map[string]Header `json:"headers,omitempty"`
|
|
||||||
Examples map[string]interface{} `json:"examples,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response describes a single response from an API Operation.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#responseObject
|
|
||||||
type Response struct {
|
|
||||||
Refable
|
|
||||||
ResponseProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
|
||||||
func (r *Response) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &r.ResponseProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &r.Refable); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this items object to JSON
|
|
||||||
func (r Response) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(r.ResponseProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(r.Refable)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b1, b2), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewResponse creates a new response instance
|
|
||||||
func NewResponse() *Response {
|
|
||||||
return new(Response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponseRef creates a response as a json reference
|
|
||||||
func ResponseRef(url string) *Response {
|
|
||||||
resp := NewResponse()
|
|
||||||
resp.Ref = MustCreateRef(url)
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDescription sets the description on this response, allows for chaining
|
|
||||||
func (r *Response) WithDescription(description string) *Response {
|
|
||||||
r.Description = description
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSchema sets the schema on this response, allows for chaining.
|
|
||||||
// Passing a nil argument removes the schema from this response
|
|
||||||
func (r *Response) WithSchema(schema *Schema) *Response {
|
|
||||||
r.Schema = schema
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddHeader adds a header to this response
|
|
||||||
func (r *Response) AddHeader(name string, header *Header) *Response {
|
|
||||||
if header == nil {
|
|
||||||
return r.RemoveHeader(name)
|
|
||||||
}
|
|
||||||
if r.Headers == nil {
|
|
||||||
r.Headers = make(map[string]Header)
|
|
||||||
}
|
|
||||||
r.Headers[name] = *header
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveHeader removes a header from this response
|
|
||||||
func (r *Response) RemoveHeader(name string) *Response {
|
|
||||||
delete(r.Headers, name)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddExample adds an example to this response
|
|
||||||
func (r *Response) AddExample(mediaType string, example interface{}) *Response {
|
|
||||||
if r.Examples == nil {
|
|
||||||
r.Examples = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
r.Examples[mediaType] = example
|
|
||||||
return r
|
|
||||||
}
|
|
122
vendor/github.com/go-openapi/spec/responses.go
generated
vendored
122
vendor/github.com/go-openapi/spec/responses.go
generated
vendored
@ -1,122 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Responses is a container for the expected responses of an operation.
|
|
||||||
// The container maps a HTTP response code to the expected response.
|
|
||||||
// It is not expected from the documentation to necessarily cover all possible HTTP response codes,
|
|
||||||
// since they may not be known in advance. However, it is expected from the documentation to cover
|
|
||||||
// a successful operation response and any known errors.
|
|
||||||
//
|
|
||||||
// The `default` can be used a default response object for all HTTP codes that are not covered
|
|
||||||
// individually by the specification.
|
|
||||||
//
|
|
||||||
// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response
|
|
||||||
// for a successful operation call.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#responsesObject
|
|
||||||
type Responses struct {
|
|
||||||
VendorExtensible
|
|
||||||
ResponsesProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup implements an interface to customize json pointer lookup
|
|
||||||
func (r Responses) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if token == "default" {
|
|
||||||
return r.Default, nil
|
|
||||||
}
|
|
||||||
if ex, ok := r.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
if i, err := strconv.Atoi(token); err == nil {
|
|
||||||
if scr, ok := r.StatusCodeResponses[i]; ok {
|
|
||||||
return &scr, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("object has no field %q", token)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
|
||||||
func (r *Responses) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) {
|
|
||||||
r.ResponsesProps = ResponsesProps{}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this items object to JSON
|
|
||||||
func (r Responses) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(r.ResponsesProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(r.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
concated := swag.ConcatJSON(b1, b2)
|
|
||||||
return concated, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResponsesProps struct {
|
|
||||||
Default *Response
|
|
||||||
StatusCodeResponses map[int]Response
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r ResponsesProps) MarshalJSON() ([]byte, error) {
|
|
||||||
toser := map[string]Response{}
|
|
||||||
if r.Default != nil {
|
|
||||||
toser["default"] = *r.Default
|
|
||||||
}
|
|
||||||
for k, v := range r.StatusCodeResponses {
|
|
||||||
toser[strconv.Itoa(k)] = v
|
|
||||||
}
|
|
||||||
return json.Marshal(toser)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
|
|
||||||
var res map[string]Response
|
|
||||||
if err := json.Unmarshal(data, &res); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if v, ok := res["default"]; ok {
|
|
||||||
r.Default = &v
|
|
||||||
delete(res, "default")
|
|
||||||
}
|
|
||||||
for k, v := range res {
|
|
||||||
if nk, err := strconv.Atoi(k); err == nil {
|
|
||||||
if r.StatusCodeResponses == nil {
|
|
||||||
r.StatusCodeResponses = map[int]Response{}
|
|
||||||
}
|
|
||||||
r.StatusCodeResponses[nk] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
628
vendor/github.com/go-openapi/spec/schema.go
generated
vendored
628
vendor/github.com/go-openapi/spec/schema.go
generated
vendored
@ -1,628 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BooleanProperty creates a boolean property
|
|
||||||
func BooleanProperty() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolProperty creates a boolean property
|
|
||||||
func BoolProperty() *Schema { return BooleanProperty() }
|
|
||||||
|
|
||||||
// StringProperty creates a string property
|
|
||||||
func StringProperty() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CharProperty creates a string property
|
|
||||||
func CharProperty() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Property creates a float64/double property
|
|
||||||
func Float64Property() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float32Property creates a float32/float property
|
|
||||||
func Float32Property() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int8Property creates an int8 property
|
|
||||||
func Int8Property() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int16Property creates an int16 property
|
|
||||||
func Int16Property() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32Property creates an int32 property
|
|
||||||
func Int32Property() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Property creates an int64 property
|
|
||||||
func Int64Property() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StrFmtProperty creates a property for the named string format
|
|
||||||
func StrFmtProperty(format string) *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DateProperty creates a date property
|
|
||||||
func DateProperty() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DateTimeProperty creates a date time property
|
|
||||||
func DateTimeProperty() *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapProperty creates a map property
|
|
||||||
func MapProperty(property *Schema) *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefProperty creates a ref property
|
|
||||||
func RefProperty(name string) *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefSchema creates a ref property
|
|
||||||
func RefSchema(name string) *Schema {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayProperty creates an array property
|
|
||||||
func ArrayProperty(items *Schema) *Schema {
|
|
||||||
if items == nil {
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}}
|
|
||||||
}
|
|
||||||
return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComposedSchema creates a schema with allOf
|
|
||||||
func ComposedSchema(schemas ...Schema) *Schema {
|
|
||||||
s := new(Schema)
|
|
||||||
s.AllOf = schemas
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// SchemaURL represents a schema url
|
|
||||||
type SchemaURL string
|
|
||||||
|
|
||||||
// MarshalJSON marshal this to JSON
|
|
||||||
func (r SchemaURL) MarshalJSON() ([]byte, error) {
|
|
||||||
if r == "" {
|
|
||||||
return []byte("{}"), nil
|
|
||||||
}
|
|
||||||
v := map[string]interface{}{"$schema": string(r)}
|
|
||||||
return json.Marshal(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshal this from JSON
|
|
||||||
func (r *SchemaURL) UnmarshalJSON(data []byte) error {
|
|
||||||
var v map[string]interface{}
|
|
||||||
if err := json.Unmarshal(data, &v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if vv, ok := v["$schema"]; ok {
|
|
||||||
if str, ok := vv.(string); ok {
|
|
||||||
u, err := url.Parse(str)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*r = SchemaURL(u.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// type ExtraSchemaProps map[string]interface{}
|
|
||||||
|
|
||||||
// // JSONSchema represents a structure that is a json schema draft 04
|
|
||||||
// type JSONSchema struct {
|
|
||||||
// SchemaProps
|
|
||||||
// ExtraSchemaProps
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // MarshalJSON marshal this to JSON
|
|
||||||
// func (s JSONSchema) MarshalJSON() ([]byte, error) {
|
|
||||||
// b1, err := json.Marshal(s.SchemaProps)
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
// b2, err := s.Ref.MarshalJSON()
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
// b3, err := s.Schema.MarshalJSON()
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
// b4, err := json.Marshal(s.ExtraSchemaProps)
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
// return swag.ConcatJSON(b1, b2, b3, b4), nil
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // UnmarshalJSON marshal this from JSON
|
|
||||||
// func (s *JSONSchema) UnmarshalJSON(data []byte) error {
|
|
||||||
// var sch JSONSchema
|
|
||||||
// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// if err := json.Unmarshal(data, &sch.Ref); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// if err := json.Unmarshal(data, &sch.Schema); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// *s = sch
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
|
|
||||||
type SchemaProps struct {
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
Ref Ref `json:"-,omitempty"`
|
|
||||||
Schema SchemaURL `json:"-,omitempty"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
Type StringOrArray `json:"type,omitempty"`
|
|
||||||
Format string `json:"format,omitempty"`
|
|
||||||
Title string `json:"title,omitempty"`
|
|
||||||
Default interface{} `json:"default,omitempty"`
|
|
||||||
Maximum *float64 `json:"maximum,omitempty"`
|
|
||||||
ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
|
|
||||||
Minimum *float64 `json:"minimum,omitempty"`
|
|
||||||
ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
|
|
||||||
MaxLength *int64 `json:"maxLength,omitempty"`
|
|
||||||
MinLength *int64 `json:"minLength,omitempty"`
|
|
||||||
Pattern string `json:"pattern,omitempty"`
|
|
||||||
MaxItems *int64 `json:"maxItems,omitempty"`
|
|
||||||
MinItems *int64 `json:"minItems,omitempty"`
|
|
||||||
UniqueItems bool `json:"uniqueItems,omitempty"`
|
|
||||||
MultipleOf *float64 `json:"multipleOf,omitempty"`
|
|
||||||
Enum []interface{} `json:"enum,omitempty"`
|
|
||||||
MaxProperties *int64 `json:"maxProperties,omitempty"`
|
|
||||||
MinProperties *int64 `json:"minProperties,omitempty"`
|
|
||||||
Required []string `json:"required,omitempty"`
|
|
||||||
Items *SchemaOrArray `json:"items,omitempty"`
|
|
||||||
AllOf []Schema `json:"allOf,omitempty"`
|
|
||||||
OneOf []Schema `json:"oneOf,omitempty"`
|
|
||||||
AnyOf []Schema `json:"anyOf,omitempty"`
|
|
||||||
Not *Schema `json:"not,omitempty"`
|
|
||||||
Properties map[string]Schema `json:"properties,omitempty"`
|
|
||||||
AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"`
|
|
||||||
PatternProperties map[string]Schema `json:"patternProperties,omitempty"`
|
|
||||||
Dependencies Dependencies `json:"dependencies,omitempty"`
|
|
||||||
AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"`
|
|
||||||
Definitions Definitions `json:"definitions,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SwaggerSchemaProps struct {
|
|
||||||
Discriminator string `json:"discriminator,omitempty"`
|
|
||||||
ReadOnly bool `json:"readOnly,omitempty"`
|
|
||||||
XML *XMLObject `json:"xml,omitempty"`
|
|
||||||
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
|
|
||||||
Example interface{} `json:"example,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema the schema object allows the definition of input and output data types.
|
|
||||||
// These types can be objects, but also primitives and arrays.
|
|
||||||
// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/)
|
|
||||||
// and uses a predefined subset of it.
|
|
||||||
// On top of this subset, there are extensions provided by this specification to allow for more complete documentation.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#schemaObject
|
|
||||||
type Schema struct {
|
|
||||||
VendorExtensible
|
|
||||||
SchemaProps
|
|
||||||
SwaggerSchemaProps
|
|
||||||
ExtraProps map[string]interface{} `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup implements an interface to customize json pointer lookup
|
|
||||||
func (s Schema) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := s.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if ex, ok := s.ExtraProps[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
r, _, err := jsonpointer.GetForToken(s.SchemaProps, token)
|
|
||||||
if r != nil || err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithID sets the id for this schema, allows for chaining
|
|
||||||
func (s *Schema) WithID(id string) *Schema {
|
|
||||||
s.ID = id
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithTitle sets the title for this schema, allows for chaining
|
|
||||||
func (s *Schema) WithTitle(title string) *Schema {
|
|
||||||
s.Title = title
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDescription sets the description for this schema, allows for chaining
|
|
||||||
func (s *Schema) WithDescription(description string) *Schema {
|
|
||||||
s.Description = description
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithProperties sets the properties for this schema
|
|
||||||
func (s *Schema) WithProperties(schemas map[string]Schema) *Schema {
|
|
||||||
s.Properties = schemas
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetProperty sets a property on this schema
|
|
||||||
func (s *Schema) SetProperty(name string, schema Schema) *Schema {
|
|
||||||
if s.Properties == nil {
|
|
||||||
s.Properties = make(map[string]Schema)
|
|
||||||
}
|
|
||||||
s.Properties[name] = schema
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAllOf sets the all of property
|
|
||||||
func (s *Schema) WithAllOf(schemas ...Schema) *Schema {
|
|
||||||
s.AllOf = schemas
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxProperties sets the max number of properties an object can have
|
|
||||||
func (s *Schema) WithMaxProperties(max int64) *Schema {
|
|
||||||
s.MaxProperties = &max
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinProperties sets the min number of properties an object must have
|
|
||||||
func (s *Schema) WithMinProperties(min int64) *Schema {
|
|
||||||
s.MinProperties = &min
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Typed sets the type of this schema for a single value item
|
|
||||||
func (s *Schema) Typed(tpe, format string) *Schema {
|
|
||||||
s.Type = []string{tpe}
|
|
||||||
s.Format = format
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddType adds a type with potential format to the types for this schema
|
|
||||||
func (s *Schema) AddType(tpe, format string) *Schema {
|
|
||||||
s.Type = append(s.Type, tpe)
|
|
||||||
if format != "" {
|
|
||||||
s.Format = format
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// CollectionOf a fluent builder method for an array parameter
|
|
||||||
func (s *Schema) CollectionOf(items Schema) *Schema {
|
|
||||||
s.Type = []string{"array"}
|
|
||||||
s.Items = &SchemaOrArray{Schema: &items}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefault sets the default value on this parameter
|
|
||||||
func (s *Schema) WithDefault(defaultValue interface{}) *Schema {
|
|
||||||
s.Default = defaultValue
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRequired flags this parameter as required
|
|
||||||
func (s *Schema) WithRequired(items ...string) *Schema {
|
|
||||||
s.Required = items
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddRequired adds field names to the required properties array
|
|
||||||
func (s *Schema) AddRequired(items ...string) *Schema {
|
|
||||||
s.Required = append(s.Required, items...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxLength sets a max length value
|
|
||||||
func (s *Schema) WithMaxLength(max int64) *Schema {
|
|
||||||
s.MaxLength = &max
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinLength sets a min length value
|
|
||||||
func (s *Schema) WithMinLength(min int64) *Schema {
|
|
||||||
s.MinLength = &min
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPattern sets a pattern value
|
|
||||||
func (s *Schema) WithPattern(pattern string) *Schema {
|
|
||||||
s.Pattern = pattern
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMultipleOf sets a multiple of value
|
|
||||||
func (s *Schema) WithMultipleOf(number float64) *Schema {
|
|
||||||
s.MultipleOf = &number
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaximum sets a maximum number value
|
|
||||||
func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema {
|
|
||||||
s.Maximum = &max
|
|
||||||
s.ExclusiveMaximum = exclusive
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinimum sets a minimum number value
|
|
||||||
func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema {
|
|
||||||
s.Minimum = &min
|
|
||||||
s.ExclusiveMinimum = exclusive
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEnum sets a the enum values (replace)
|
|
||||||
func (s *Schema) WithEnum(values ...interface{}) *Schema {
|
|
||||||
s.Enum = append([]interface{}{}, values...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxItems sets the max items
|
|
||||||
func (s *Schema) WithMaxItems(size int64) *Schema {
|
|
||||||
s.MaxItems = &size
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMinItems sets the min items
|
|
||||||
func (s *Schema) WithMinItems(size int64) *Schema {
|
|
||||||
s.MinItems = &size
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueValues dictates that this array can only have unique items
|
|
||||||
func (s *Schema) UniqueValues() *Schema {
|
|
||||||
s.UniqueItems = true
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowDuplicates this array can have duplicates
|
|
||||||
func (s *Schema) AllowDuplicates() *Schema {
|
|
||||||
s.UniqueItems = false
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddToAllOf adds a schema to the allOf property
|
|
||||||
func (s *Schema) AddToAllOf(schemas ...Schema) *Schema {
|
|
||||||
s.AllOf = append(s.AllOf, schemas...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDiscriminator sets the name of the discriminator field
|
|
||||||
func (s *Schema) WithDiscriminator(discriminator string) *Schema {
|
|
||||||
s.Discriminator = discriminator
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsReadOnly flags this schema as readonly
|
|
||||||
func (s *Schema) AsReadOnly() *Schema {
|
|
||||||
s.ReadOnly = true
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsWritable flags this schema as writeable (not read-only)
|
|
||||||
func (s *Schema) AsWritable() *Schema {
|
|
||||||
s.ReadOnly = false
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithExample sets the example for this schema
|
|
||||||
func (s *Schema) WithExample(example interface{}) *Schema {
|
|
||||||
s.Example = example
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithExternalDocs sets/removes the external docs for/from this schema.
|
|
||||||
// When you pass empty strings as params the external documents will be removed.
|
|
||||||
// When you pass non-empty string as one value then those values will be used on the external docs object.
|
|
||||||
// So when you pass a non-empty description, you should also pass the url and vice versa.
|
|
||||||
func (s *Schema) WithExternalDocs(description, url string) *Schema {
|
|
||||||
if description == "" && url == "" {
|
|
||||||
s.ExternalDocs = nil
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.ExternalDocs == nil {
|
|
||||||
s.ExternalDocs = &ExternalDocumentation{}
|
|
||||||
}
|
|
||||||
s.ExternalDocs.Description = description
|
|
||||||
s.ExternalDocs.URL = url
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithXMLName sets the xml name for the object
|
|
||||||
func (s *Schema) WithXMLName(name string) *Schema {
|
|
||||||
if s.XML == nil {
|
|
||||||
s.XML = new(XMLObject)
|
|
||||||
}
|
|
||||||
s.XML.Name = name
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithXMLNamespace sets the xml namespace for the object
|
|
||||||
func (s *Schema) WithXMLNamespace(namespace string) *Schema {
|
|
||||||
if s.XML == nil {
|
|
||||||
s.XML = new(XMLObject)
|
|
||||||
}
|
|
||||||
s.XML.Namespace = namespace
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithXMLPrefix sets the xml prefix for the object
|
|
||||||
func (s *Schema) WithXMLPrefix(prefix string) *Schema {
|
|
||||||
if s.XML == nil {
|
|
||||||
s.XML = new(XMLObject)
|
|
||||||
}
|
|
||||||
s.XML.Prefix = prefix
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsXMLAttribute flags this object as xml attribute
|
|
||||||
func (s *Schema) AsXMLAttribute() *Schema {
|
|
||||||
if s.XML == nil {
|
|
||||||
s.XML = new(XMLObject)
|
|
||||||
}
|
|
||||||
s.XML.Attribute = true
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsXMLElement flags this object as an xml node
|
|
||||||
func (s *Schema) AsXMLElement() *Schema {
|
|
||||||
if s.XML == nil {
|
|
||||||
s.XML = new(XMLObject)
|
|
||||||
}
|
|
||||||
s.XML.Attribute = false
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsWrappedXML flags this object as wrapped, this is mostly useful for array types
|
|
||||||
func (s *Schema) AsWrappedXML() *Schema {
|
|
||||||
if s.XML == nil {
|
|
||||||
s.XML = new(XMLObject)
|
|
||||||
}
|
|
||||||
s.XML.Wrapped = true
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsUnwrappedXML flags this object as an xml node
|
|
||||||
func (s *Schema) AsUnwrappedXML() *Schema {
|
|
||||||
if s.XML == nil {
|
|
||||||
s.XML = new(XMLObject)
|
|
||||||
}
|
|
||||||
s.XML.Wrapped = false
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshal this to JSON
|
|
||||||
func (s Schema) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(s.SchemaProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("schema props %v", err)
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(s.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("vendor props %v", err)
|
|
||||||
}
|
|
||||||
b3, err := s.Ref.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("ref prop %v", err)
|
|
||||||
}
|
|
||||||
b4, err := s.Schema.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("schema prop %v", err)
|
|
||||||
}
|
|
||||||
b5, err := json.Marshal(s.SwaggerSchemaProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("common validations %v", err)
|
|
||||||
}
|
|
||||||
var b6 []byte
|
|
||||||
if s.ExtraProps != nil {
|
|
||||||
jj, err := json.Marshal(s.ExtraProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("extra props %v", err)
|
|
||||||
}
|
|
||||||
b6 = jj
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON marshal this from JSON
|
|
||||||
func (s *Schema) UnmarshalJSON(data []byte) error {
|
|
||||||
var sch Schema
|
|
||||||
if err := json.Unmarshal(data, &sch.SchemaProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &sch.Ref); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &sch.Schema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &sch.SwaggerSchemaProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var d map[string]interface{}
|
|
||||||
if err := json.Unmarshal(data, &d); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(d, "$ref")
|
|
||||||
delete(d, "$schema")
|
|
||||||
for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) {
|
|
||||||
delete(d, pn)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, vv := range d {
|
|
||||||
lk := strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(lk, "x-") {
|
|
||||||
if sch.Extensions == nil {
|
|
||||||
sch.Extensions = map[string]interface{}{}
|
|
||||||
}
|
|
||||||
sch.Extensions[k] = vv
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if sch.ExtraProps == nil {
|
|
||||||
sch.ExtraProps = map[string]interface{}{}
|
|
||||||
}
|
|
||||||
sch.ExtraProps[k] = vv
|
|
||||||
}
|
|
||||||
|
|
||||||
*s = sch
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
142
vendor/github.com/go-openapi/spec/security_scheme.go
generated
vendored
142
vendor/github.com/go-openapi/spec/security_scheme.go
generated
vendored
@ -1,142 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
basic = "basic"
|
|
||||||
apiKey = "apiKey"
|
|
||||||
oauth2 = "oauth2"
|
|
||||||
implicit = "implicit"
|
|
||||||
password = "password"
|
|
||||||
application = "application"
|
|
||||||
accessCode = "accessCode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BasicAuth creates a basic auth security scheme
|
|
||||||
func BasicAuth() *SecurityScheme {
|
|
||||||
return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIKeyAuth creates an api key auth security scheme
|
|
||||||
func APIKeyAuth(fieldName, valueSource string) *SecurityScheme {
|
|
||||||
return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OAuth2Implicit creates an implicit flow oauth2 security scheme
|
|
||||||
func OAuth2Implicit(authorizationURL string) *SecurityScheme {
|
|
||||||
return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
|
|
||||||
Type: oauth2,
|
|
||||||
Flow: implicit,
|
|
||||||
AuthorizationURL: authorizationURL,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OAuth2Password creates a password flow oauth2 security scheme
|
|
||||||
func OAuth2Password(tokenURL string) *SecurityScheme {
|
|
||||||
return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
|
|
||||||
Type: oauth2,
|
|
||||||
Flow: password,
|
|
||||||
TokenURL: tokenURL,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OAuth2Application creates an application flow oauth2 security scheme
|
|
||||||
func OAuth2Application(tokenURL string) *SecurityScheme {
|
|
||||||
return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
|
|
||||||
Type: oauth2,
|
|
||||||
Flow: application,
|
|
||||||
TokenURL: tokenURL,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OAuth2AccessToken creates an access token flow oauth2 security scheme
|
|
||||||
func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme {
|
|
||||||
return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
|
|
||||||
Type: oauth2,
|
|
||||||
Flow: accessCode,
|
|
||||||
AuthorizationURL: authorizationURL,
|
|
||||||
TokenURL: tokenURL,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type SecuritySchemeProps struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Name string `json:"name,omitempty"` // api key
|
|
||||||
In string `json:"in,omitempty"` // api key
|
|
||||||
Flow string `json:"flow,omitempty"` // oauth2
|
|
||||||
AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2
|
|
||||||
TokenURL string `json:"tokenUrl,omitempty"` // oauth2
|
|
||||||
Scopes map[string]string `json:"scopes,omitempty"` // oauth2
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddScope adds a scope to this security scheme
|
|
||||||
func (s *SecuritySchemeProps) AddScope(scope, description string) {
|
|
||||||
if s.Scopes == nil {
|
|
||||||
s.Scopes = make(map[string]string)
|
|
||||||
}
|
|
||||||
s.Scopes[scope] = description
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecurityScheme allows the definition of a security scheme that can be used by the operations.
|
|
||||||
// Supported schemes are basic authentication, an API key (either as a header or as a query parameter)
|
|
||||||
// and OAuth2's common flows (implicit, password, application and access code).
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#securitySchemeObject
|
|
||||||
type SecurityScheme struct {
|
|
||||||
VendorExtensible
|
|
||||||
SecuritySchemeProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup implements an interface to customize json pointer lookup
|
|
||||||
func (s SecurityScheme) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := s.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshal this to JSON
|
|
||||||
func (s SecurityScheme) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(s.SecuritySchemeProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(s.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b1, b2), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON marshal this from JSON
|
|
||||||
func (s *SecurityScheme) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &s.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
79
vendor/github.com/go-openapi/spec/spec.go
generated
vendored
79
vendor/github.com/go-openapi/spec/spec.go
generated
vendored
@ -1,79 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import "encoding/json"
|
|
||||||
|
|
||||||
//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/...
|
|
||||||
//go:generate perl -pi -e s,Json,JSON,g bindata.go
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs
|
|
||||||
SwaggerSchemaURL = "http://swagger.io/v2/schema.json#"
|
|
||||||
// JSONSchemaURL the url for the json schema schema
|
|
||||||
JSONSchemaURL = "http://json-schema.org/draft-04/schema#"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
jsonSchema = MustLoadJSONSchemaDraft04()
|
|
||||||
swaggerSchema = MustLoadSwagger20Schema()
|
|
||||||
)
|
|
||||||
|
|
||||||
// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error
|
|
||||||
func MustLoadJSONSchemaDraft04() *Schema {
|
|
||||||
d, e := JSONSchemaDraft04()
|
|
||||||
if e != nil {
|
|
||||||
panic(e)
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONSchemaDraft04 loads the json schema document for json shema draft04
|
|
||||||
func JSONSchemaDraft04() (*Schema, error) {
|
|
||||||
b, err := Asset("jsonschema-draft-04.json")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
schema := new(Schema)
|
|
||||||
if err := json.Unmarshal(b, schema); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return schema, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustLoadSwagger20Schema panics when Swagger20Schema returns an error
|
|
||||||
func MustLoadSwagger20Schema() *Schema {
|
|
||||||
d, e := Swagger20Schema()
|
|
||||||
if e != nil {
|
|
||||||
panic(e)
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swagger20Schema loads the swagger 2.0 schema from the embedded assets
|
|
||||||
func Swagger20Schema() (*Schema, error) {
|
|
||||||
|
|
||||||
b, err := Asset("v2/schema.json")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
schema := new(Schema)
|
|
||||||
if err := json.Unmarshal(b, schema); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return schema, nil
|
|
||||||
}
|
|
317
vendor/github.com/go-openapi/spec/swagger.go
generated
vendored
317
vendor/github.com/go-openapi/spec/swagger.go
generated
vendored
@ -1,317 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Swagger this is the root document object for the API specification.
|
|
||||||
// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#swagger-object-
|
|
||||||
type Swagger struct {
|
|
||||||
VendorExtensible
|
|
||||||
SwaggerProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup look up a value by the json property name
|
|
||||||
func (s Swagger) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := s.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals this swagger structure to json
|
|
||||||
func (s Swagger) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(s.SwaggerProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(s.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b1, b2), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals a swagger spec from json
|
|
||||||
func (s *Swagger) UnmarshalJSON(data []byte) error {
|
|
||||||
var sw Swagger
|
|
||||||
if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*s = sw
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type SwaggerProps struct {
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
Consumes []string `json:"consumes,omitempty"`
|
|
||||||
Produces []string `json:"produces,omitempty"`
|
|
||||||
Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss]
|
|
||||||
Swagger string `json:"swagger,omitempty"`
|
|
||||||
Info *Info `json:"info,omitempty"`
|
|
||||||
Host string `json:"host,omitempty"`
|
|
||||||
BasePath string `json:"basePath,omitempty"` // must start with a leading "/"
|
|
||||||
Paths *Paths `json:"paths"` // required
|
|
||||||
Definitions Definitions `json:"definitions"`
|
|
||||||
Parameters map[string]Parameter `json:"parameters,omitempty"`
|
|
||||||
Responses map[string]Response `json:"responses,omitempty"`
|
|
||||||
SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"`
|
|
||||||
Security []map[string][]string `json:"security,omitempty"`
|
|
||||||
Tags []Tag `json:"tags,omitempty"`
|
|
||||||
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dependencies represent a dependencies property
|
|
||||||
type Dependencies map[string]SchemaOrStringArray
|
|
||||||
|
|
||||||
// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property
|
|
||||||
type SchemaOrBool struct {
|
|
||||||
Allows bool
|
|
||||||
Schema *Schema
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup implements an interface to customize json pointer lookup
|
|
||||||
func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if token == "allows" {
|
|
||||||
return s.Allows, nil
|
|
||||||
}
|
|
||||||
r, _, err := jsonpointer.GetForToken(s.Schema, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var jsTrue = []byte("true")
|
|
||||||
var jsFalse = []byte("false")
|
|
||||||
|
|
||||||
// MarshalJSON convert this object to JSON
|
|
||||||
func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
|
|
||||||
if s.Schema != nil {
|
|
||||||
return json.Marshal(s.Schema)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.Schema == nil && !s.Allows {
|
|
||||||
return jsFalse, nil
|
|
||||||
}
|
|
||||||
return jsTrue, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON converts this bool or schema object from a JSON structure
|
|
||||||
func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
|
|
||||||
var nw SchemaOrBool
|
|
||||||
if len(data) >= 4 {
|
|
||||||
if data[0] == '{' {
|
|
||||||
var sch Schema
|
|
||||||
if err := json.Unmarshal(data, &sch); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nw.Schema = &sch
|
|
||||||
}
|
|
||||||
nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e')
|
|
||||||
}
|
|
||||||
*s = nw
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SchemaOrStringArray represents a schema or a string array
|
|
||||||
type SchemaOrStringArray struct {
|
|
||||||
Schema *Schema
|
|
||||||
Property []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup implements an interface to customize json pointer lookup
|
|
||||||
func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) {
|
|
||||||
r, _, err := jsonpointer.GetForToken(s.Schema, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this schema object or array into JSON structure
|
|
||||||
func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
|
|
||||||
if len(s.Property) > 0 {
|
|
||||||
return json.Marshal(s.Property)
|
|
||||||
}
|
|
||||||
if s.Schema != nil {
|
|
||||||
return json.Marshal(s.Schema)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON converts this schema object or array from a JSON structure
|
|
||||||
func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error {
|
|
||||||
var first byte
|
|
||||||
if len(data) > 1 {
|
|
||||||
first = data[0]
|
|
||||||
}
|
|
||||||
var nw SchemaOrStringArray
|
|
||||||
if first == '{' {
|
|
||||||
var sch Schema
|
|
||||||
if err := json.Unmarshal(data, &sch); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nw.Schema = &sch
|
|
||||||
}
|
|
||||||
if first == '[' {
|
|
||||||
if err := json.Unmarshal(data, &nw.Property); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*s = nw
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Definitions contains the models explicitly defined in this spec
|
|
||||||
// An object to hold data types that can be consumed and produced by operations.
|
|
||||||
// These data types can be primitives, arrays or models.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#definitionsObject
|
|
||||||
type Definitions map[string]Schema
|
|
||||||
|
|
||||||
// SecurityDefinitions a declaration of the security schemes available to be used in the specification.
|
|
||||||
// This does not enforce the security schemes on the operations and only serves to provide
|
|
||||||
// the relevant details for each scheme.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#securityDefinitionsObject
|
|
||||||
type SecurityDefinitions map[string]*SecurityScheme
|
|
||||||
|
|
||||||
// StringOrArray represents a value that can either be a string
|
|
||||||
// or an array of strings. Mainly here for serialization purposes
|
|
||||||
type StringOrArray []string
|
|
||||||
|
|
||||||
// Contains returns true when the value is contained in the slice
|
|
||||||
func (s StringOrArray) Contains(value string) bool {
|
|
||||||
for _, str := range s {
|
|
||||||
if str == value {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup implements an interface to customize json pointer lookup
|
|
||||||
func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if _, err := strconv.Atoi(token); err == nil {
|
|
||||||
r, _, err := jsonpointer.GetForToken(s.Schemas, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
r, _, err := jsonpointer.GetForToken(s.Schema, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string
|
|
||||||
func (s *StringOrArray) UnmarshalJSON(data []byte) error {
|
|
||||||
var first byte
|
|
||||||
if len(data) > 1 {
|
|
||||||
first = data[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
if first == '[' {
|
|
||||||
var parsed []string
|
|
||||||
if err := json.Unmarshal(data, &parsed); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*s = StringOrArray(parsed)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var single interface{}
|
|
||||||
if err := json.Unmarshal(data, &single); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if single == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch single.(type) {
|
|
||||||
case string:
|
|
||||||
*s = StringOrArray([]string{single.(string)})
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("only string or array is allowed, not %T", single)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this string or array to a JSON array or JSON string
|
|
||||||
func (s StringOrArray) MarshalJSON() ([]byte, error) {
|
|
||||||
if len(s) == 1 {
|
|
||||||
return json.Marshal([]string(s)[0])
|
|
||||||
}
|
|
||||||
return json.Marshal([]string(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SchemaOrArray represents a value that can either be a Schema
|
|
||||||
// or an array of Schema. Mainly here for serialization purposes
|
|
||||||
type SchemaOrArray struct {
|
|
||||||
Schema *Schema
|
|
||||||
Schemas []Schema
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of schemas in this property
|
|
||||||
func (s SchemaOrArray) Len() int {
|
|
||||||
if s.Schema != nil {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return len(s.Schemas)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainsType returns true when one of the schemas is of the specified type
|
|
||||||
func (s *SchemaOrArray) ContainsType(name string) bool {
|
|
||||||
if s.Schema != nil {
|
|
||||||
return s.Schema.Type != nil && s.Schema.Type.Contains(name)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts this schema object or array into JSON structure
|
|
||||||
func (s SchemaOrArray) MarshalJSON() ([]byte, error) {
|
|
||||||
if len(s.Schemas) > 0 {
|
|
||||||
return json.Marshal(s.Schemas)
|
|
||||||
}
|
|
||||||
return json.Marshal(s.Schema)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON converts this schema object or array from a JSON structure
|
|
||||||
func (s *SchemaOrArray) UnmarshalJSON(data []byte) error {
|
|
||||||
var nw SchemaOrArray
|
|
||||||
var first byte
|
|
||||||
if len(data) > 1 {
|
|
||||||
first = data[0]
|
|
||||||
}
|
|
||||||
if first == '{' {
|
|
||||||
var sch Schema
|
|
||||||
if err := json.Unmarshal(data, &sch); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nw.Schema = &sch
|
|
||||||
}
|
|
||||||
if first == '[' {
|
|
||||||
if err := json.Unmarshal(data, &nw.Schemas); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*s = nw
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim:set ft=go noet sts=2 sw=2 ts=2:
|
|
73
vendor/github.com/go-openapi/spec/tag.go
generated
vendored
73
vendor/github.com/go-openapi/spec/tag.go
generated
vendored
@ -1,73 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TagProps struct {
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTag creates a new tag
|
|
||||||
func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag {
|
|
||||||
return Tag{TagProps: TagProps{description, name, externalDocs}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject).
|
|
||||||
// It is not mandatory to have a Tag Object per tag used there.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#tagObject
|
|
||||||
type Tag struct {
|
|
||||||
VendorExtensible
|
|
||||||
TagProps
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONLookup implements an interface to customize json pointer lookup
|
|
||||||
func (t Tag) JSONLookup(token string) (interface{}, error) {
|
|
||||||
if ex, ok := t.Extensions[token]; ok {
|
|
||||||
return &ex, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
r, _, err := jsonpointer.GetForToken(t.TagProps, token)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshal this to JSON
|
|
||||||
func (t Tag) MarshalJSON() ([]byte, error) {
|
|
||||||
b1, err := json.Marshal(t.TagProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := json.Marshal(t.VendorExtensible)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swag.ConcatJSON(b1, b2), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON marshal this from JSON
|
|
||||||
func (t *Tag) UnmarshalJSON(data []byte) error {
|
|
||||||
if err := json.Unmarshal(data, &t.TagProps); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return json.Unmarshal(data, &t.VendorExtensible)
|
|
||||||
}
|
|
68
vendor/github.com/go-openapi/spec/xml_object.go
generated
vendored
68
vendor/github.com/go-openapi/spec/xml_object.go
generated
vendored
@ -1,68 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package spec
|
|
||||||
|
|
||||||
// XMLObject a metadata object that allows for more fine-tuned XML model definitions.
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#xmlObject
|
|
||||||
type XMLObject struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Namespace string `json:"namespace,omitempty"`
|
|
||||||
Prefix string `json:"prefix,omitempty"`
|
|
||||||
Attribute bool `json:"attribute,omitempty"`
|
|
||||||
Wrapped bool `json:"wrapped,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithName sets the xml name for the object
|
|
||||||
func (x *XMLObject) WithName(name string) *XMLObject {
|
|
||||||
x.Name = name
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithNamespace sets the xml namespace for the object
|
|
||||||
func (x *XMLObject) WithNamespace(namespace string) *XMLObject {
|
|
||||||
x.Namespace = namespace
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPrefix sets the xml prefix for the object
|
|
||||||
func (x *XMLObject) WithPrefix(prefix string) *XMLObject {
|
|
||||||
x.Prefix = prefix
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsAttribute flags this object as xml attribute
|
|
||||||
func (x *XMLObject) AsAttribute() *XMLObject {
|
|
||||||
x.Attribute = true
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsElement flags this object as an xml node
|
|
||||||
func (x *XMLObject) AsElement() *XMLObject {
|
|
||||||
x.Attribute = false
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsWrapped flags this object as wrapped, this is mostly useful for array types
|
|
||||||
func (x *XMLObject) AsWrapped() *XMLObject {
|
|
||||||
x.Wrapped = true
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsUnwrapped flags this object as an xml node
|
|
||||||
func (x *XMLObject) AsUnwrapped() *XMLObject {
|
|
||||||
x.Wrapped = false
|
|
||||||
return x
|
|
||||||
}
|
|
202
vendor/github.com/go-openapi/swag/LICENSE
generated
vendored
202
vendor/github.com/go-openapi/swag/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
12
vendor/github.com/go-openapi/swag/README.md
generated
vendored
12
vendor/github.com/go-openapi/swag/README.md
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
# Swag [](https://ci.vmware.run/go-openapi/swag) [](https://coverage.vmware.run/go-openapi/swag) [](https://slackin.goswagger.io)
|
|
||||||
|
|
||||||
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [](http://godoc.org/github.com/go-openapi/swag)
|
|
||||||
|
|
||||||
Contains a bunch of helper functions:
|
|
||||||
|
|
||||||
* convert between value and pointers for builtins
|
|
||||||
* convert from string to builtin
|
|
||||||
* fast json concatenation
|
|
||||||
* search in path
|
|
||||||
* load from file or http
|
|
||||||
* name manglin
|
|
188
vendor/github.com/go-openapi/swag/convert.go
generated
vendored
188
vendor/github.com/go-openapi/swag/convert.go
generated
vendored
@ -1,188 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package swag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
|
|
||||||
const (
|
|
||||||
maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
|
|
||||||
minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive
|
|
||||||
func IsFloat64AJSONInteger(f float64) bool {
|
|
||||||
if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return f == float64(int64(f)) || f == float64(uint64(f))
|
|
||||||
}
|
|
||||||
|
|
||||||
var evaluatesAsTrue = map[string]struct{}{
|
|
||||||
"true": struct{}{},
|
|
||||||
"1": struct{}{},
|
|
||||||
"yes": struct{}{},
|
|
||||||
"ok": struct{}{},
|
|
||||||
"y": struct{}{},
|
|
||||||
"on": struct{}{},
|
|
||||||
"selected": struct{}{},
|
|
||||||
"checked": struct{}{},
|
|
||||||
"t": struct{}{},
|
|
||||||
"enabled": struct{}{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertBool turn a string into a boolean
|
|
||||||
func ConvertBool(str string) (bool, error) {
|
|
||||||
_, ok := evaluatesAsTrue[strings.ToLower(str)]
|
|
||||||
return ok, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertFloat32 turn a string into a float32
|
|
||||||
func ConvertFloat32(str string) (float32, error) {
|
|
||||||
f, err := strconv.ParseFloat(str, 32)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return float32(f), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertFloat64 turn a string into a float64
|
|
||||||
func ConvertFloat64(str string) (float64, error) {
|
|
||||||
return strconv.ParseFloat(str, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertInt8 turn a string into int8 boolean
|
|
||||||
func ConvertInt8(str string) (int8, error) {
|
|
||||||
i, err := strconv.ParseInt(str, 10, 8)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return int8(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertInt16 turn a string into a int16
|
|
||||||
func ConvertInt16(str string) (int16, error) {
|
|
||||||
i, err := strconv.ParseInt(str, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return int16(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertInt32 turn a string into a int32
|
|
||||||
func ConvertInt32(str string) (int32, error) {
|
|
||||||
i, err := strconv.ParseInt(str, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return int32(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertInt64 turn a string into a int64
|
|
||||||
func ConvertInt64(str string) (int64, error) {
|
|
||||||
return strconv.ParseInt(str, 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertUint8 turn a string into a uint8
|
|
||||||
func ConvertUint8(str string) (uint8, error) {
|
|
||||||
i, err := strconv.ParseUint(str, 10, 8)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint8(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertUint16 turn a string into a uint16
|
|
||||||
func ConvertUint16(str string) (uint16, error) {
|
|
||||||
i, err := strconv.ParseUint(str, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint16(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertUint32 turn a string into a uint32
|
|
||||||
func ConvertUint32(str string) (uint32, error) {
|
|
||||||
i, err := strconv.ParseUint(str, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint32(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertUint64 turn a string into a uint64
|
|
||||||
func ConvertUint64(str string) (uint64, error) {
|
|
||||||
return strconv.ParseUint(str, 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatBool turns a boolean into a string
|
|
||||||
func FormatBool(value bool) string {
|
|
||||||
return strconv.FormatBool(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatFloat32 turns a float32 into a string
|
|
||||||
func FormatFloat32(value float32) string {
|
|
||||||
return strconv.FormatFloat(float64(value), 'f', -1, 32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatFloat64 turns a float64 into a string
|
|
||||||
func FormatFloat64(value float64) string {
|
|
||||||
return strconv.FormatFloat(value, 'f', -1, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatInt8 turns an int8 into a string
|
|
||||||
func FormatInt8(value int8) string {
|
|
||||||
return strconv.FormatInt(int64(value), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatInt16 turns an int16 into a string
|
|
||||||
func FormatInt16(value int16) string {
|
|
||||||
return strconv.FormatInt(int64(value), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatInt32 turns an int32 into a string
|
|
||||||
func FormatInt32(value int32) string {
|
|
||||||
return strconv.FormatInt(int64(value), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatInt64 turns an int64 into a string
|
|
||||||
func FormatInt64(value int64) string {
|
|
||||||
return strconv.FormatInt(value, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatUint8 turns an uint8 into a string
|
|
||||||
func FormatUint8(value uint8) string {
|
|
||||||
return strconv.FormatUint(uint64(value), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatUint16 turns an uint16 into a string
|
|
||||||
func FormatUint16(value uint16) string {
|
|
||||||
return strconv.FormatUint(uint64(value), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatUint32 turns an uint32 into a string
|
|
||||||
func FormatUint32(value uint32) string {
|
|
||||||
return strconv.FormatUint(uint64(value), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatUint64 turns an uint64 into a string
|
|
||||||
func FormatUint64(value uint64) string {
|
|
||||||
return strconv.FormatUint(value, 10)
|
|
||||||
}
|
|
595
vendor/github.com/go-openapi/swag/convert_types.go
generated
vendored
595
vendor/github.com/go-openapi/swag/convert_types.go
generated
vendored
@ -1,595 +0,0 @@
|
|||||||
package swag
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// This file was taken from the aws go sdk
|
|
||||||
|
|
||||||
// String returns a pointer to of the string value passed in.
|
|
||||||
func String(v string) *string {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringValue returns the value of the string pointer passed in or
|
|
||||||
// "" if the pointer is nil.
|
|
||||||
func StringValue(v *string) string {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringSlice converts a slice of string values into a slice of
|
|
||||||
// string pointers
|
|
||||||
func StringSlice(src []string) []*string {
|
|
||||||
dst := make([]*string, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringValueSlice converts a slice of string pointers into a slice of
|
|
||||||
// string values
|
|
||||||
func StringValueSlice(src []*string) []string {
|
|
||||||
dst := make([]string, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringMap converts a string map of string values into a string
|
|
||||||
// map of string pointers
|
|
||||||
func StringMap(src map[string]string) map[string]*string {
|
|
||||||
dst := make(map[string]*string)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringValueMap converts a string map of string pointers into a string
|
|
||||||
// map of string values
|
|
||||||
func StringValueMap(src map[string]*string) map[string]string {
|
|
||||||
dst := make(map[string]string)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool returns a pointer to of the bool value passed in.
|
|
||||||
func Bool(v bool) *bool {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolValue returns the value of the bool pointer passed in or
|
|
||||||
// false if the pointer is nil.
|
|
||||||
func BoolValue(v *bool) bool {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolSlice converts a slice of bool values into a slice of
|
|
||||||
// bool pointers
|
|
||||||
func BoolSlice(src []bool) []*bool {
|
|
||||||
dst := make([]*bool, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolValueSlice converts a slice of bool pointers into a slice of
|
|
||||||
// bool values
|
|
||||||
func BoolValueSlice(src []*bool) []bool {
|
|
||||||
dst := make([]bool, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolMap converts a string map of bool values into a string
|
|
||||||
// map of bool pointers
|
|
||||||
func BoolMap(src map[string]bool) map[string]*bool {
|
|
||||||
dst := make(map[string]*bool)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolValueMap converts a string map of bool pointers into a string
|
|
||||||
// map of bool values
|
|
||||||
func BoolValueMap(src map[string]*bool) map[string]bool {
|
|
||||||
dst := make(map[string]bool)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int returns a pointer to of the int value passed in.
|
|
||||||
func Int(v int) *int {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntValue returns the value of the int pointer passed in or
|
|
||||||
// 0 if the pointer is nil.
|
|
||||||
func IntValue(v *int) int {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntSlice converts a slice of int values into a slice of
|
|
||||||
// int pointers
|
|
||||||
func IntSlice(src []int) []*int {
|
|
||||||
dst := make([]*int, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntValueSlice converts a slice of int pointers into a slice of
|
|
||||||
// int values
|
|
||||||
func IntValueSlice(src []*int) []int {
|
|
||||||
dst := make([]int, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntMap converts a string map of int values into a string
|
|
||||||
// map of int pointers
|
|
||||||
func IntMap(src map[string]int) map[string]*int {
|
|
||||||
dst := make(map[string]*int)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntValueMap converts a string map of int pointers into a string
|
|
||||||
// map of int values
|
|
||||||
func IntValueMap(src map[string]*int) map[string]int {
|
|
||||||
dst := make(map[string]int)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32 returns a pointer to of the int64 value passed in.
|
|
||||||
func Int32(v int32) *int32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32Value returns the value of the int64 pointer passed in or
|
|
||||||
// 0 if the pointer is nil.
|
|
||||||
func Int32Value(v *int32) int32 {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32Slice converts a slice of int64 values into a slice of
|
|
||||||
// int32 pointers
|
|
||||||
func Int32Slice(src []int32) []*int32 {
|
|
||||||
dst := make([]*int32, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32ValueSlice converts a slice of int32 pointers into a slice of
|
|
||||||
// int32 values
|
|
||||||
func Int32ValueSlice(src []*int32) []int32 {
|
|
||||||
dst := make([]int32, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32Map converts a string map of int32 values into a string
|
|
||||||
// map of int32 pointers
|
|
||||||
func Int32Map(src map[string]int32) map[string]*int32 {
|
|
||||||
dst := make(map[string]*int32)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32ValueMap converts a string map of int32 pointers into a string
|
|
||||||
// map of int32 values
|
|
||||||
func Int32ValueMap(src map[string]*int32) map[string]int32 {
|
|
||||||
dst := make(map[string]int32)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 returns a pointer to of the int64 value passed in.
|
|
||||||
func Int64(v int64) *int64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Value returns the value of the int64 pointer passed in or
|
|
||||||
// 0 if the pointer is nil.
|
|
||||||
func Int64Value(v *int64) int64 {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Slice converts a slice of int64 values into a slice of
|
|
||||||
// int64 pointers
|
|
||||||
func Int64Slice(src []int64) []*int64 {
|
|
||||||
dst := make([]*int64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
|
||||||
// int64 values
|
|
||||||
func Int64ValueSlice(src []*int64) []int64 {
|
|
||||||
dst := make([]int64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Map converts a string map of int64 values into a string
|
|
||||||
// map of int64 pointers
|
|
||||||
func Int64Map(src map[string]int64) map[string]*int64 {
|
|
||||||
dst := make(map[string]*int64)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64ValueMap converts a string map of int64 pointers into a string
|
|
||||||
// map of int64 values
|
|
||||||
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
|
||||||
dst := make(map[string]int64)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint returns a pouinter to of the uint value passed in.
|
|
||||||
func Uint(v uint) *uint {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// UintValue returns the value of the uint pouinter passed in or
|
|
||||||
// 0 if the pouinter is nil.
|
|
||||||
func UintValue(v *uint) uint {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// UintSlice converts a slice of uint values uinto a slice of
|
|
||||||
// uint pouinters
|
|
||||||
func UintSlice(src []uint) []*uint {
|
|
||||||
dst := make([]*uint, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// UintValueSlice converts a slice of uint pouinters uinto a slice of
|
|
||||||
// uint values
|
|
||||||
func UintValueSlice(src []*uint) []uint {
|
|
||||||
dst := make([]uint, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// UintMap converts a string map of uint values uinto a string
|
|
||||||
// map of uint pouinters
|
|
||||||
func UintMap(src map[string]uint) map[string]*uint {
|
|
||||||
dst := make(map[string]*uint)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// UintValueMap converts a string map of uint pouinters uinto a string
|
|
||||||
// map of uint values
|
|
||||||
func UintValueMap(src map[string]*uint) map[string]uint {
|
|
||||||
dst := make(map[string]uint)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32 returns a pouinter to of the uint64 value passed in.
|
|
||||||
func Uint32(v uint32) *uint32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32Value returns the value of the uint64 pouinter passed in or
|
|
||||||
// 0 if the pouinter is nil.
|
|
||||||
func Uint32Value(v *uint32) uint32 {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32Slice converts a slice of uint64 values uinto a slice of
|
|
||||||
// uint32 pouinters
|
|
||||||
func Uint32Slice(src []uint32) []*uint32 {
|
|
||||||
dst := make([]*uint32, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of
|
|
||||||
// uint32 values
|
|
||||||
func Uint32ValueSlice(src []*uint32) []uint32 {
|
|
||||||
dst := make([]uint32, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32Map converts a string map of uint32 values uinto a string
|
|
||||||
// map of uint32 pouinters
|
|
||||||
func Uint32Map(src map[string]uint32) map[string]*uint32 {
|
|
||||||
dst := make(map[string]*uint32)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32ValueMap converts a string map of uint32 pouinters uinto a string
|
|
||||||
// map of uint32 values
|
|
||||||
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
|
|
||||||
dst := make(map[string]uint32)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64 returns a pouinter to of the uint64 value passed in.
|
|
||||||
func Uint64(v uint64) *uint64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64Value returns the value of the uint64 pouinter passed in or
|
|
||||||
// 0 if the pouinter is nil.
|
|
||||||
func Uint64Value(v *uint64) uint64 {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64Slice converts a slice of uint64 values uinto a slice of
|
|
||||||
// uint64 pouinters
|
|
||||||
func Uint64Slice(src []uint64) []*uint64 {
|
|
||||||
dst := make([]*uint64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of
|
|
||||||
// uint64 values
|
|
||||||
func Uint64ValueSlice(src []*uint64) []uint64 {
|
|
||||||
dst := make([]uint64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64Map converts a string map of uint64 values uinto a string
|
|
||||||
// map of uint64 pouinters
|
|
||||||
func Uint64Map(src map[string]uint64) map[string]*uint64 {
|
|
||||||
dst := make(map[string]*uint64)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64ValueMap converts a string map of uint64 pouinters uinto a string
|
|
||||||
// map of uint64 values
|
|
||||||
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
|
|
||||||
dst := make(map[string]uint64)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 returns a pointer to of the float64 value passed in.
|
|
||||||
func Float64(v float64) *float64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Value returns the value of the float64 pointer passed in or
|
|
||||||
// 0 if the pointer is nil.
|
|
||||||
func Float64Value(v *float64) float64 {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Slice converts a slice of float64 values into a slice of
|
|
||||||
// float64 pointers
|
|
||||||
func Float64Slice(src []float64) []*float64 {
|
|
||||||
dst := make([]*float64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
|
||||||
// float64 values
|
|
||||||
func Float64ValueSlice(src []*float64) []float64 {
|
|
||||||
dst := make([]float64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Map converts a string map of float64 values into a string
|
|
||||||
// map of float64 pointers
|
|
||||||
func Float64Map(src map[string]float64) map[string]*float64 {
|
|
||||||
dst := make(map[string]*float64)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64ValueMap converts a string map of float64 pointers into a string
|
|
||||||
// map of float64 values
|
|
||||||
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
|
||||||
dst := make(map[string]float64)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Time returns a pointer to of the time.Time value passed in.
|
|
||||||
func Time(v time.Time) *time.Time {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeValue returns the value of the time.Time pointer passed in or
|
|
||||||
// time.Time{} if the pointer is nil.
|
|
||||||
func TimeValue(v *time.Time) time.Time {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeSlice converts a slice of time.Time values into a slice of
|
|
||||||
// time.Time pointers
|
|
||||||
func TimeSlice(src []time.Time) []*time.Time {
|
|
||||||
dst := make([]*time.Time, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
|
||||||
// time.Time values
|
|
||||||
func TimeValueSlice(src []*time.Time) []time.Time {
|
|
||||||
dst := make([]time.Time, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeMap converts a string map of time.Time values into a string
|
|
||||||
// map of time.Time pointers
|
|
||||||
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
|
||||||
dst := make(map[string]*time.Time)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeValueMap converts a string map of time.Time pointers into a string
|
|
||||||
// map of time.Time values
|
|
||||||
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
|
||||||
dst := make(map[string]time.Time)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
270
vendor/github.com/go-openapi/swag/json.go
generated
vendored
270
vendor/github.com/go-openapi/swag/json.go
generated
vendored
@ -1,270 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package swag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/mailru/easyjson/jlexer"
|
|
||||||
"github.com/mailru/easyjson/jwriter"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultJSONNameProvider the default cache for types
|
|
||||||
var DefaultJSONNameProvider = NewNameProvider()
|
|
||||||
|
|
||||||
const comma = byte(',')
|
|
||||||
|
|
||||||
var closers = map[byte]byte{
|
|
||||||
'{': '}',
|
|
||||||
'[': ']',
|
|
||||||
}
|
|
||||||
|
|
||||||
type ejMarshaler interface {
|
|
||||||
MarshalEasyJSON(w *jwriter.Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ejUnmarshaler interface {
|
|
||||||
UnmarshalEasyJSON(w *jlexer.Lexer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller
|
|
||||||
// so it takes the fastest option available.
|
|
||||||
func WriteJSON(data interface{}) ([]byte, error) {
|
|
||||||
if d, ok := data.(ejMarshaler); ok {
|
|
||||||
jw := new(jwriter.Writer)
|
|
||||||
d.MarshalEasyJSON(jw)
|
|
||||||
return jw.BuildBytes()
|
|
||||||
}
|
|
||||||
if d, ok := data.(json.Marshaler); ok {
|
|
||||||
return d.MarshalJSON()
|
|
||||||
}
|
|
||||||
return json.Marshal(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller
|
|
||||||
// so it takes the fastes option available
|
|
||||||
func ReadJSON(data []byte, value interface{}) error {
|
|
||||||
if d, ok := value.(ejUnmarshaler); ok {
|
|
||||||
jl := &jlexer.Lexer{Data: data}
|
|
||||||
d.UnmarshalEasyJSON(jl)
|
|
||||||
return jl.Error()
|
|
||||||
}
|
|
||||||
if d, ok := value.(json.Unmarshaler); ok {
|
|
||||||
return d.UnmarshalJSON(data)
|
|
||||||
}
|
|
||||||
return json.Unmarshal(data, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DynamicJSONToStruct converts an untyped json structure into a struct
|
|
||||||
func DynamicJSONToStruct(data interface{}, target interface{}) error {
|
|
||||||
// TODO: convert straight to a json typed map (mergo + iterate?)
|
|
||||||
b, err := WriteJSON(data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := ReadJSON(b, target); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConcatJSON concatenates multiple json objects efficiently
|
|
||||||
func ConcatJSON(blobs ...[]byte) []byte {
|
|
||||||
if len(blobs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if len(blobs) == 1 {
|
|
||||||
return blobs[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
last := len(blobs) - 1
|
|
||||||
var opening, closing byte
|
|
||||||
a := 0
|
|
||||||
idx := 0
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
for i, b := range blobs {
|
|
||||||
if len(b) > 0 && opening == 0 { // is this an array or an object?
|
|
||||||
opening, closing = b[0], closers[b[0]]
|
|
||||||
}
|
|
||||||
|
|
||||||
if opening != '{' && opening != '[' {
|
|
||||||
continue // don't know how to concatenate non container objects
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b) < 3 { // yep empty but also the last one, so closing this thing
|
|
||||||
if i == last && a > 0 {
|
|
||||||
buf.WriteByte(closing)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
idx = 0
|
|
||||||
if a > 0 { // we need to join with a comma for everything beyond the first non-empty item
|
|
||||||
buf.WriteByte(comma)
|
|
||||||
idx = 1 // this is not the first or the last so we want to drop the leading bracket
|
|
||||||
}
|
|
||||||
|
|
||||||
if i != last { // not the last one, strip brackets
|
|
||||||
buf.Write(b[idx : len(b)-1])
|
|
||||||
} else { // last one, strip only the leading bracket
|
|
||||||
buf.Write(b[idx:])
|
|
||||||
}
|
|
||||||
a++
|
|
||||||
}
|
|
||||||
// somehow it ended up being empty, so provide a default value
|
|
||||||
if buf.Len() == 0 {
|
|
||||||
buf.WriteByte(opening)
|
|
||||||
buf.WriteByte(closing)
|
|
||||||
}
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDynamicJSON turns an object into a properly JSON typed structure
|
|
||||||
func ToDynamicJSON(data interface{}) interface{} {
|
|
||||||
// TODO: convert straight to a json typed map (mergo + iterate?)
|
|
||||||
b, _ := json.Marshal(data)
|
|
||||||
var res interface{}
|
|
||||||
json.Unmarshal(b, &res)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromDynamicJSON turns an object into a properly JSON typed structure
|
|
||||||
func FromDynamicJSON(data, target interface{}) error {
|
|
||||||
b, _ := json.Marshal(data)
|
|
||||||
return json.Unmarshal(b, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameProvider represents an object capabale of translating from go property names
|
|
||||||
// to json property names
|
|
||||||
// This type is thread-safe.
|
|
||||||
type NameProvider struct {
|
|
||||||
lock *sync.Mutex
|
|
||||||
index map[reflect.Type]nameIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
type nameIndex struct {
|
|
||||||
jsonNames map[string]string
|
|
||||||
goNames map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNameProvider creates a new name provider
|
|
||||||
func NewNameProvider() *NameProvider {
|
|
||||||
return &NameProvider{
|
|
||||||
lock: &sync.Mutex{},
|
|
||||||
index: make(map[reflect.Type]nameIndex),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) {
|
|
||||||
for i := 0; i < tpe.NumField(); i++ {
|
|
||||||
targetDes := tpe.Field(i)
|
|
||||||
|
|
||||||
if targetDes.PkgPath != "" { // unexported
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if targetDes.Anonymous { // walk embedded structures tree down first
|
|
||||||
buildnameIndex(targetDes.Type, idx, reverseIdx)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if tag := targetDes.Tag.Get("json"); tag != "" {
|
|
||||||
|
|
||||||
parts := strings.Split(tag, ",")
|
|
||||||
if len(parts) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
nm := parts[0]
|
|
||||||
if nm == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if nm == "" { // empty string means we want to use the Go name
|
|
||||||
nm = targetDes.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
idx[nm] = targetDes.Name
|
|
||||||
reverseIdx[targetDes.Name] = nm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNameIndex(tpe reflect.Type) nameIndex {
|
|
||||||
var idx = make(map[string]string, tpe.NumField())
|
|
||||||
var reverseIdx = make(map[string]string, tpe.NumField())
|
|
||||||
|
|
||||||
buildnameIndex(tpe, idx, reverseIdx)
|
|
||||||
return nameIndex{jsonNames: idx, goNames: reverseIdx}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetJSONNames gets all the json property names for a type
|
|
||||||
func (n *NameProvider) GetJSONNames(subject interface{}) []string {
|
|
||||||
tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
|
|
||||||
names, ok := n.index[tpe]
|
|
||||||
if !ok {
|
|
||||||
names = n.makeNameIndex(tpe)
|
|
||||||
}
|
|
||||||
|
|
||||||
var res []string
|
|
||||||
for k := range names.jsonNames {
|
|
||||||
res = append(res, k)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetJSONName gets the json name for a go property name
|
|
||||||
func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) {
|
|
||||||
tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
|
|
||||||
return n.GetJSONNameForType(tpe, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetJSONNameForType gets the json name for a go property name on a given type
|
|
||||||
func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) {
|
|
||||||
names, ok := n.index[tpe]
|
|
||||||
if !ok {
|
|
||||||
names = n.makeNameIndex(tpe)
|
|
||||||
}
|
|
||||||
nme, ok := names.goNames[name]
|
|
||||||
return nme, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex {
|
|
||||||
n.lock.Lock()
|
|
||||||
defer n.lock.Unlock()
|
|
||||||
names := newNameIndex(tpe)
|
|
||||||
n.index[tpe] = names
|
|
||||||
return names
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetGoName gets the go name for a json property name
|
|
||||||
func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) {
|
|
||||||
tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
|
|
||||||
return n.GetGoNameForType(tpe, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetGoNameForType gets the go name for a given type for a json property name
|
|
||||||
func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) {
|
|
||||||
names, ok := n.index[tpe]
|
|
||||||
if !ok {
|
|
||||||
names = n.makeNameIndex(tpe)
|
|
||||||
}
|
|
||||||
nme, ok := names.jsonNames[name]
|
|
||||||
return nme, ok
|
|
||||||
}
|
|
49
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
49
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
@ -1,49 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package swag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
|
|
||||||
func LoadFromFileOrHTTP(path string) ([]byte, error) {
|
|
||||||
return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadStrategy returns a loader function for a given path or uri
|
|
||||||
func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
|
|
||||||
if strings.HasPrefix(path, "http") {
|
|
||||||
return remote
|
|
||||||
}
|
|
||||||
return local
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadHTTPBytes(path string) ([]byte, error) {
|
|
||||||
resp, err := http.Get(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ioutil.ReadAll(resp.Body)
|
|
||||||
}
|
|
24
vendor/github.com/go-openapi/swag/net.go
generated
vendored
24
vendor/github.com/go-openapi/swag/net.go
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
package swag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SplitHostPort splits a network address into a host and a port.
|
|
||||||
// The port is -1 when there is no port to be found
|
|
||||||
func SplitHostPort(addr string) (host string, port int, err error) {
|
|
||||||
h, p, err := net.SplitHostPort(addr)
|
|
||||||
if err != nil {
|
|
||||||
return "", -1, err
|
|
||||||
}
|
|
||||||
if p == "" {
|
|
||||||
return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr}
|
|
||||||
}
|
|
||||||
|
|
||||||
pi, err := strconv.Atoi(p)
|
|
||||||
if err != nil {
|
|
||||||
return "", -1, err
|
|
||||||
}
|
|
||||||
return h, pi, nil
|
|
||||||
}
|
|
56
vendor/github.com/go-openapi/swag/path.go
generated
vendored
56
vendor/github.com/go-openapi/swag/path.go
generated
vendored
@ -1,56 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package swag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// GOPATHKey represents the env key for gopath
|
|
||||||
GOPATHKey = "GOPATH"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FindInSearchPath finds a package in a provided lists of paths
|
|
||||||
func FindInSearchPath(searchPath, pkg string) string {
|
|
||||||
pathsList := filepath.SplitList(searchPath)
|
|
||||||
for _, path := range pathsList {
|
|
||||||
if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil {
|
|
||||||
if _, err := os.Stat(evaluatedPath); err == nil {
|
|
||||||
return evaluatedPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT
|
|
||||||
func FindInGoSearchPath(pkg string) string {
|
|
||||||
return FindInSearchPath(FullGoSearchPath(), pkg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FullGoSearchPath gets the search paths for finding packages
|
|
||||||
func FullGoSearchPath() string {
|
|
||||||
allPaths := os.Getenv(GOPATHKey)
|
|
||||||
if allPaths != "" {
|
|
||||||
allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":")
|
|
||||||
} else {
|
|
||||||
allPaths = runtime.GOROOT()
|
|
||||||
}
|
|
||||||
return allPaths
|
|
||||||
}
|
|
318
vendor/github.com/go-openapi/swag/util.go
generated
vendored
318
vendor/github.com/go-openapi/swag/util.go
generated
vendored
@ -1,318 +0,0 @@
|
|||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package swag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698
|
|
||||||
var commonInitialisms = map[string]bool{
|
|
||||||
"API": true,
|
|
||||||
"ASCII": true,
|
|
||||||
"CPU": true,
|
|
||||||
"CSS": true,
|
|
||||||
"DNS": true,
|
|
||||||
"EOF": true,
|
|
||||||
"GUID": true,
|
|
||||||
"HTML": true,
|
|
||||||
"HTTPS": true,
|
|
||||||
"HTTP": true,
|
|
||||||
"ID": true,
|
|
||||||
"IP": true,
|
|
||||||
"JSON": true,
|
|
||||||
"LHS": true,
|
|
||||||
"QPS": true,
|
|
||||||
"RAM": true,
|
|
||||||
"RHS": true,
|
|
||||||
"RPC": true,
|
|
||||||
"SLA": true,
|
|
||||||
"SMTP": true,
|
|
||||||
"SSH": true,
|
|
||||||
"TCP": true,
|
|
||||||
"TLS": true,
|
|
||||||
"TTL": true,
|
|
||||||
"UDP": true,
|
|
||||||
"UUID": true,
|
|
||||||
"UID": true,
|
|
||||||
"UI": true,
|
|
||||||
"URI": true,
|
|
||||||
"URL": true,
|
|
||||||
"UTF8": true,
|
|
||||||
"VM": true,
|
|
||||||
"XML": true,
|
|
||||||
"XSRF": true,
|
|
||||||
"XSS": true,
|
|
||||||
}
|
|
||||||
var initialisms []string
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for k := range commonInitialisms {
|
|
||||||
initialisms = append(initialisms, k)
|
|
||||||
}
|
|
||||||
sort.Sort(sort.Reverse(byLength(initialisms)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// JoinByFormat joins a string array by a known format:
|
|
||||||
// ssv: space separated value
|
|
||||||
// tsv: tab separated value
|
|
||||||
// pipes: pipe (|) separated value
|
|
||||||
// csv: comma separated value (default)
|
|
||||||
func JoinByFormat(data []string, format string) []string {
|
|
||||||
if len(data) == 0 {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
var sep string
|
|
||||||
switch format {
|
|
||||||
case "ssv":
|
|
||||||
sep = " "
|
|
||||||
case "tsv":
|
|
||||||
sep = "\t"
|
|
||||||
case "pipes":
|
|
||||||
sep = "|"
|
|
||||||
case "multi":
|
|
||||||
return data
|
|
||||||
default:
|
|
||||||
sep = ","
|
|
||||||
}
|
|
||||||
return []string{strings.Join(data, sep)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitByFormat splits a string by a known format:
|
|
||||||
// ssv: space separated value
|
|
||||||
// tsv: tab separated value
|
|
||||||
// pipes: pipe (|) separated value
|
|
||||||
// csv: comma separated value (default)
|
|
||||||
func SplitByFormat(data, format string) []string {
|
|
||||||
if data == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var sep string
|
|
||||||
switch format {
|
|
||||||
case "ssv":
|
|
||||||
sep = " "
|
|
||||||
case "tsv":
|
|
||||||
sep = "\t"
|
|
||||||
case "pipes":
|
|
||||||
sep = "|"
|
|
||||||
case "multi":
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
sep = ","
|
|
||||||
}
|
|
||||||
var result []string
|
|
||||||
for _, s := range strings.Split(data, sep) {
|
|
||||||
if ts := strings.TrimSpace(s); ts != "" {
|
|
||||||
result = append(result, ts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
type byLength []string
|
|
||||||
|
|
||||||
func (s byLength) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
func (s byLength) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
func (s byLength) Less(i, j int) bool {
|
|
||||||
return len(s[i]) < len(s[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepares strings by splitting by caps, spaces, dashes, and underscore
|
|
||||||
func split(str string) (words []string) {
|
|
||||||
repl := strings.NewReplacer(
|
|
||||||
"@", "At ",
|
|
||||||
"&", "And ",
|
|
||||||
"|", "Pipe ",
|
|
||||||
"$", "Dollar ",
|
|
||||||
"!", "Bang ",
|
|
||||||
"-", " ",
|
|
||||||
"_", " ",
|
|
||||||
)
|
|
||||||
|
|
||||||
rex1 := regexp.MustCompile(`(\p{Lu})`)
|
|
||||||
rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`)
|
|
||||||
|
|
||||||
str = trim(str)
|
|
||||||
|
|
||||||
// Convert dash and underscore to spaces
|
|
||||||
str = repl.Replace(str)
|
|
||||||
|
|
||||||
// Split when uppercase is found (needed for Snake)
|
|
||||||
str = rex1.ReplaceAllString(str, " $1")
|
|
||||||
// check if consecutive single char things make up an initialism
|
|
||||||
|
|
||||||
for _, k := range initialisms {
|
|
||||||
str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1)
|
|
||||||
}
|
|
||||||
// Get the final list of words
|
|
||||||
words = rex2.FindAllString(str, -1)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes leading whitespaces
|
|
||||||
func trim(str string) string {
|
|
||||||
return strings.Trim(str, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shortcut to strings.ToUpper()
|
|
||||||
func upper(str string) string {
|
|
||||||
return strings.ToUpper(trim(str))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shortcut to strings.ToLower()
|
|
||||||
func lower(str string) string {
|
|
||||||
return strings.ToLower(trim(str))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToFileName lowercases and underscores a go type name
|
|
||||||
func ToFileName(name string) string {
|
|
||||||
var out []string
|
|
||||||
for _, w := range split(name) {
|
|
||||||
out = append(out, lower(w))
|
|
||||||
}
|
|
||||||
return strings.Join(out, "_")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToCommandName lowercases and underscores a go type name
|
|
||||||
func ToCommandName(name string) string {
|
|
||||||
var out []string
|
|
||||||
for _, w := range split(name) {
|
|
||||||
out = append(out, lower(w))
|
|
||||||
}
|
|
||||||
return strings.Join(out, "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToHumanNameLower represents a code name as a human series of words
|
|
||||||
func ToHumanNameLower(name string) string {
|
|
||||||
var out []string
|
|
||||||
for _, w := range split(name) {
|
|
||||||
if !commonInitialisms[upper(w)] {
|
|
||||||
out = append(out, lower(w))
|
|
||||||
} else {
|
|
||||||
out = append(out, w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(out, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
|
|
||||||
func ToHumanNameTitle(name string) string {
|
|
||||||
var out []string
|
|
||||||
for _, w := range split(name) {
|
|
||||||
uw := upper(w)
|
|
||||||
if !commonInitialisms[uw] {
|
|
||||||
out = append(out, upper(w[:1])+lower(w[1:]))
|
|
||||||
} else {
|
|
||||||
out = append(out, w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(out, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToJSONName camelcases a name which can be underscored or pascal cased
|
|
||||||
func ToJSONName(name string) string {
|
|
||||||
var out []string
|
|
||||||
for i, w := range split(name) {
|
|
||||||
if i == 0 {
|
|
||||||
out = append(out, lower(w))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out = append(out, upper(w[:1])+lower(w[1:]))
|
|
||||||
}
|
|
||||||
return strings.Join(out, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToVarName camelcases a name which can be underscored or pascal cased
|
|
||||||
func ToVarName(name string) string {
|
|
||||||
res := ToGoName(name)
|
|
||||||
if len(res) <= 1 {
|
|
||||||
return lower(res)
|
|
||||||
}
|
|
||||||
return lower(res[:1]) + res[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
|
|
||||||
func ToGoName(name string) string {
|
|
||||||
var out []string
|
|
||||||
for _, w := range split(name) {
|
|
||||||
uw := upper(w)
|
|
||||||
mod := int(math.Min(float64(len(uw)), 2))
|
|
||||||
if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] {
|
|
||||||
uw = upper(w[:1]) + lower(w[1:])
|
|
||||||
}
|
|
||||||
out = append(out, uw)
|
|
||||||
}
|
|
||||||
return strings.Join(out, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainsStringsCI searches a slice of strings for a case-insensitive match
|
|
||||||
func ContainsStringsCI(coll []string, item string) bool {
|
|
||||||
for _, a := range coll {
|
|
||||||
if strings.EqualFold(a, item) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type zeroable interface {
|
|
||||||
IsZero() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true when the value passed into the function is a zero value.
|
|
||||||
// This allows for safer checking of interface values.
|
|
||||||
func IsZero(data interface{}) bool {
|
|
||||||
// check for things that have an IsZero method instead
|
|
||||||
if vv, ok := data.(zeroable); ok {
|
|
||||||
return vv.IsZero()
|
|
||||||
}
|
|
||||||
// continue with slightly more complex reflection
|
|
||||||
v := reflect.ValueOf(data)
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
return v.Len() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
|
||||||
return v.IsNil()
|
|
||||||
case reflect.Struct, reflect.Array:
|
|
||||||
return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
|
|
||||||
case reflect.Invalid:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommandLineOptionsGroup represents a group of user-defined command line options
|
|
||||||
type CommandLineOptionsGroup struct {
|
|
||||||
ShortDescription string
|
|
||||||
LongDescription string
|
|
||||||
Options interface{}
|
|
||||||
}
|
|
202
vendor/github.com/google/btree/LICENSE
generated
vendored
202
vendor/github.com/google/btree/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
12
vendor/github.com/google/btree/README.md
generated
vendored
12
vendor/github.com/google/btree/README.md
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
# BTree implementation for Go
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
|
||||||
an ordered, mutable data structure.
|
|
||||||
|
|
||||||
The API is based off of the wonderful
|
|
||||||
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
|
|
||||||
act as a drop-in replacement for gollrb trees.
|
|
||||||
|
|
||||||
See http://godoc.org/github.com/google/btree for documentation.
|
|
649
vendor/github.com/google/btree/btree.go
generated
vendored
649
vendor/github.com/google/btree/btree.go
generated
vendored
@ -1,649 +0,0 @@
|
|||||||
// Copyright 2014 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
|
||||||
//
|
|
||||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
|
||||||
// It is not meant for persistent storage solutions.
|
|
||||||
//
|
|
||||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
|
||||||
// which in some cases yields better memory usage and/or performance.
|
|
||||||
// See some discussion on the matter here:
|
|
||||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
|
||||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
|
||||||
// implmentation written about there.
|
|
||||||
//
|
|
||||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
|
||||||
// slice of children. For basic numeric values or raw structs, this can cause
|
|
||||||
// efficiency differences when compared to equivalent C++ template code that
|
|
||||||
// stores values in arrays within the node:
|
|
||||||
// * Due to the overhead of storing values as interfaces (each
|
|
||||||
// value needs to be stored as the value itself, then 2 words for the
|
|
||||||
// interface pointing to that value and its type), resulting in higher
|
|
||||||
// memory use.
|
|
||||||
// * Since interfaces can point to values anywhere in memory, values are
|
|
||||||
// most likely not stored in contiguous blocks, resulting in a higher
|
|
||||||
// number of cache misses.
|
|
||||||
// These issues don't tend to matter, though, when working with strings or other
|
|
||||||
// heap-allocated structures, since C++-equivalent structures also must store
|
|
||||||
// pointers and also distribute their values across the heap.
|
|
||||||
//
|
|
||||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
|
||||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
|
||||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
|
||||||
// Its functions, therefore, exactly mirror those of
|
|
||||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
|
||||||
// support storing multiple equivalent values or backwards iteration.
|
|
||||||
package btree
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Item represents a single object in the tree.
|
|
||||||
type Item interface {
|
|
||||||
// Less tests whether the current item is less than the given argument.
|
|
||||||
//
|
|
||||||
// This must provide a strict weak ordering.
|
|
||||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
|
||||||
// hold one of either a or b in the tree).
|
|
||||||
Less(than Item) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultFreeListSize = 32
|
|
||||||
)
|
|
||||||
|
|
||||||
// FreeList represents a free list of btree nodes. By default each
|
|
||||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
|
||||||
// FreeList.
|
|
||||||
// Two Btrees using the same freelist are not safe for concurrent write access.
|
|
||||||
type FreeList struct {
|
|
||||||
freelist []*node
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFreeList creates a new free list.
|
|
||||||
// size is the maximum size of the returned free list.
|
|
||||||
func NewFreeList(size int) *FreeList {
|
|
||||||
return &FreeList{freelist: make([]*node, 0, size)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FreeList) newNode() (n *node) {
|
|
||||||
index := len(f.freelist) - 1
|
|
||||||
if index < 0 {
|
|
||||||
return new(node)
|
|
||||||
}
|
|
||||||
f.freelist, n = f.freelist[:index], f.freelist[index]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FreeList) freeNode(n *node) {
|
|
||||||
if len(f.freelist) < cap(f.freelist) {
|
|
||||||
f.freelist = append(f.freelist, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
|
||||||
// the tree. When this function returns false, iteration will stop and the
|
|
||||||
// associated Ascend* function will immediately return.
|
|
||||||
type ItemIterator func(i Item) bool
|
|
||||||
|
|
||||||
// New creates a new B-Tree with the given degree.
|
|
||||||
//
|
|
||||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
|
||||||
// and 2-4 children).
|
|
||||||
func New(degree int) *BTree {
|
|
||||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
|
||||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
|
||||||
if degree <= 1 {
|
|
||||||
panic("bad degree")
|
|
||||||
}
|
|
||||||
return &BTree{
|
|
||||||
degree: degree,
|
|
||||||
freelist: f,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// items stores items in a node.
|
|
||||||
type items []Item
|
|
||||||
|
|
||||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
|
||||||
// forward.
|
|
||||||
func (s *items) insertAt(index int, item Item) {
|
|
||||||
*s = append(*s, nil)
|
|
||||||
if index < len(*s) {
|
|
||||||
copy((*s)[index+1:], (*s)[index:])
|
|
||||||
}
|
|
||||||
(*s)[index] = item
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeAt removes a value at a given index, pulling all subsequent values
|
|
||||||
// back.
|
|
||||||
func (s *items) removeAt(index int) Item {
|
|
||||||
item := (*s)[index]
|
|
||||||
(*s)[index] = nil
|
|
||||||
copy((*s)[index:], (*s)[index+1:])
|
|
||||||
*s = (*s)[:len(*s)-1]
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
|
|
||||||
// pop removes and returns the last element in the list.
|
|
||||||
func (s *items) pop() (out Item) {
|
|
||||||
index := len(*s) - 1
|
|
||||||
out = (*s)[index]
|
|
||||||
(*s)[index] = nil
|
|
||||||
*s = (*s)[:index]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// find returns the index where the given item should be inserted into this
|
|
||||||
// list. 'found' is true if the item already exists in the list at the given
|
|
||||||
// index.
|
|
||||||
func (s items) find(item Item) (index int, found bool) {
|
|
||||||
i := sort.Search(len(s), func(i int) bool {
|
|
||||||
return item.Less(s[i])
|
|
||||||
})
|
|
||||||
if i > 0 && !s[i-1].Less(item) {
|
|
||||||
return i - 1, true
|
|
||||||
}
|
|
||||||
return i, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// children stores child nodes in a node.
|
|
||||||
type children []*node
|
|
||||||
|
|
||||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
|
||||||
// forward.
|
|
||||||
func (s *children) insertAt(index int, n *node) {
|
|
||||||
*s = append(*s, nil)
|
|
||||||
if index < len(*s) {
|
|
||||||
copy((*s)[index+1:], (*s)[index:])
|
|
||||||
}
|
|
||||||
(*s)[index] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeAt removes a value at a given index, pulling all subsequent values
|
|
||||||
// back.
|
|
||||||
func (s *children) removeAt(index int) *node {
|
|
||||||
n := (*s)[index]
|
|
||||||
(*s)[index] = nil
|
|
||||||
copy((*s)[index:], (*s)[index+1:])
|
|
||||||
*s = (*s)[:len(*s)-1]
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// pop removes and returns the last element in the list.
|
|
||||||
func (s *children) pop() (out *node) {
|
|
||||||
index := len(*s) - 1
|
|
||||||
out = (*s)[index]
|
|
||||||
(*s)[index] = nil
|
|
||||||
*s = (*s)[:index]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// node is an internal node in a tree.
|
|
||||||
//
|
|
||||||
// It must at all times maintain the invariant that either
|
|
||||||
// * len(children) == 0, len(items) unconstrained
|
|
||||||
// * len(children) == len(items) + 1
|
|
||||||
type node struct {
|
|
||||||
items items
|
|
||||||
children children
|
|
||||||
t *BTree
|
|
||||||
}
|
|
||||||
|
|
||||||
// split splits the given node at the given index. The current node shrinks,
|
|
||||||
// and this function returns the item that existed at that index and a new node
|
|
||||||
// containing all items/children after it.
|
|
||||||
func (n *node) split(i int) (Item, *node) {
|
|
||||||
item := n.items[i]
|
|
||||||
next := n.t.newNode()
|
|
||||||
next.items = append(next.items, n.items[i+1:]...)
|
|
||||||
n.items = n.items[:i]
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
next.children = append(next.children, n.children[i+1:]...)
|
|
||||||
n.children = n.children[:i+1]
|
|
||||||
}
|
|
||||||
return item, next
|
|
||||||
}
|
|
||||||
|
|
||||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
|
||||||
// Returns whether or not a split occurred.
|
|
||||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
|
||||||
if len(n.children[i].items) < maxItems {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
first := n.children[i]
|
|
||||||
item, second := first.split(maxItems / 2)
|
|
||||||
n.items.insertAt(i, item)
|
|
||||||
n.children.insertAt(i+1, second)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert inserts an item into the subtree rooted at this node, making sure
|
|
||||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
|
||||||
// be found/replaced by insert, it will be returned.
|
|
||||||
func (n *node) insert(item Item, maxItems int) Item {
|
|
||||||
i, found := n.items.find(item)
|
|
||||||
if found {
|
|
||||||
out := n.items[i]
|
|
||||||
n.items[i] = item
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
n.items.insertAt(i, item)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if n.maybeSplitChild(i, maxItems) {
|
|
||||||
inTree := n.items[i]
|
|
||||||
switch {
|
|
||||||
case item.Less(inTree):
|
|
||||||
// no change, we want first split node
|
|
||||||
case inTree.Less(item):
|
|
||||||
i++ // we want second split node
|
|
||||||
default:
|
|
||||||
out := n.items[i]
|
|
||||||
n.items[i] = item
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n.children[i].insert(item, maxItems)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get finds the given key in the subtree and returns it.
|
|
||||||
func (n *node) get(key Item) Item {
|
|
||||||
i, found := n.items.find(key)
|
|
||||||
if found {
|
|
||||||
return n.items[i]
|
|
||||||
} else if len(n.children) > 0 {
|
|
||||||
return n.children[i].get(key)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// min returns the first item in the subtree.
|
|
||||||
func min(n *node) Item {
|
|
||||||
if n == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for len(n.children) > 0 {
|
|
||||||
n = n.children[0]
|
|
||||||
}
|
|
||||||
if len(n.items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return n.items[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// max returns the last item in the subtree.
|
|
||||||
func max(n *node) Item {
|
|
||||||
if n == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for len(n.children) > 0 {
|
|
||||||
n = n.children[len(n.children)-1]
|
|
||||||
}
|
|
||||||
if len(n.items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return n.items[len(n.items)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// toRemove details what item to remove in a node.remove call.
|
|
||||||
type toRemove int
|
|
||||||
|
|
||||||
const (
|
|
||||||
removeItem toRemove = iota // removes the given item
|
|
||||||
removeMin // removes smallest item in the subtree
|
|
||||||
removeMax // removes largest item in the subtree
|
|
||||||
)
|
|
||||||
|
|
||||||
// remove removes an item from the subtree rooted at this node.
|
|
||||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
|
||||||
var i int
|
|
||||||
var found bool
|
|
||||||
switch typ {
|
|
||||||
case removeMax:
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
return n.items.pop()
|
|
||||||
}
|
|
||||||
i = len(n.items)
|
|
||||||
case removeMin:
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
return n.items.removeAt(0)
|
|
||||||
}
|
|
||||||
i = 0
|
|
||||||
case removeItem:
|
|
||||||
i, found = n.items.find(item)
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
if found {
|
|
||||||
return n.items.removeAt(i)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("invalid type")
|
|
||||||
}
|
|
||||||
// If we get to here, we have children.
|
|
||||||
child := n.children[i]
|
|
||||||
if len(child.items) <= minItems {
|
|
||||||
return n.growChildAndRemove(i, item, minItems, typ)
|
|
||||||
}
|
|
||||||
// Either we had enough items to begin with, or we've done some
|
|
||||||
// merging/stealing, because we've got enough now and we're ready to return
|
|
||||||
// stuff.
|
|
||||||
if found {
|
|
||||||
// The item exists at index 'i', and the child we've selected can give us a
|
|
||||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
|
||||||
out := n.items[i]
|
|
||||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
|
||||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
|
||||||
// and set it into where we pulled the item from.
|
|
||||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
|
||||||
// node and that the child is big enough to remove from.
|
|
||||||
return child.remove(item, minItems, typ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
|
||||||
// item from it while keeping it at minItems, then calls remove to actually
|
|
||||||
// remove it.
|
|
||||||
//
|
|
||||||
// Most documentation says we have to do two sets of special casing:
|
|
||||||
// 1) item is in this node
|
|
||||||
// 2) item is in child
|
|
||||||
// In both cases, we need to handle the two subcases:
|
|
||||||
// A) node has enough values that it can spare one
|
|
||||||
// B) node doesn't have enough values
|
|
||||||
// For the latter, we have to check:
|
|
||||||
// a) left sibling has node to spare
|
|
||||||
// b) right sibling has node to spare
|
|
||||||
// c) we must merge
|
|
||||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
|
||||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
|
||||||
// We then simply redo our remove call, and the second time (regardless of
|
|
||||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
|
||||||
// that we hit case A.
|
|
||||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
|
||||||
child := n.children[i]
|
|
||||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
|
||||||
// Steal from left child
|
|
||||||
stealFrom := n.children[i-1]
|
|
||||||
stolenItem := stealFrom.items.pop()
|
|
||||||
child.items.insertAt(0, n.items[i-1])
|
|
||||||
n.items[i-1] = stolenItem
|
|
||||||
if len(stealFrom.children) > 0 {
|
|
||||||
child.children.insertAt(0, stealFrom.children.pop())
|
|
||||||
}
|
|
||||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
|
||||||
// steal from right child
|
|
||||||
stealFrom := n.children[i+1]
|
|
||||||
stolenItem := stealFrom.items.removeAt(0)
|
|
||||||
child.items = append(child.items, n.items[i])
|
|
||||||
n.items[i] = stolenItem
|
|
||||||
if len(stealFrom.children) > 0 {
|
|
||||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if i >= len(n.items) {
|
|
||||||
i--
|
|
||||||
child = n.children[i]
|
|
||||||
}
|
|
||||||
// merge with right child
|
|
||||||
mergeItem := n.items.removeAt(i)
|
|
||||||
mergeChild := n.children.removeAt(i + 1)
|
|
||||||
child.items = append(child.items, mergeItem)
|
|
||||||
child.items = append(child.items, mergeChild.items...)
|
|
||||||
child.children = append(child.children, mergeChild.children...)
|
|
||||||
n.t.freeNode(mergeChild)
|
|
||||||
}
|
|
||||||
return n.remove(item, minItems, typ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate provides a simple method for iterating over elements in the tree.
|
|
||||||
// It could probably use some work to be extra-efficient (it calls from() a
|
|
||||||
// little more than it should), but it works pretty well for now.
|
|
||||||
//
|
|
||||||
// It requires that 'from' and 'to' both return true for values we should hit
|
|
||||||
// with the iterator. It should also be the case that 'from' returns true for
|
|
||||||
// values less than or equal to values 'to' returns true for, and 'to'
|
|
||||||
// returns true for values greater than or equal to those that 'from'
|
|
||||||
// does.
|
|
||||||
func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool {
|
|
||||||
for i, item := range n.items {
|
|
||||||
if !from(item) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !to(item) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !iter(item) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
return n.children[len(n.children)-1].iterate(from, to, iter)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used for testing/debugging purposes.
|
|
||||||
func (n *node) print(w io.Writer, level int) {
|
|
||||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
|
||||||
for _, c := range n.children {
|
|
||||||
c.print(w, level+1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BTree is an implementation of a B-Tree.
|
|
||||||
//
|
|
||||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
|
||||||
// removal, and iteration.
|
|
||||||
//
|
|
||||||
// Write operations are not safe for concurrent mutation by multiple
|
|
||||||
// goroutines, but Read operations are.
|
|
||||||
type BTree struct {
|
|
||||||
degree int
|
|
||||||
length int
|
|
||||||
root *node
|
|
||||||
freelist *FreeList
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxItems returns the max number of items to allow per node.
|
|
||||||
func (t *BTree) maxItems() int {
|
|
||||||
return t.degree*2 - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// minItems returns the min number of items to allow per node (ignored for the
|
|
||||||
// root node).
|
|
||||||
func (t *BTree) minItems() int {
|
|
||||||
return t.degree - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BTree) newNode() (n *node) {
|
|
||||||
n = t.freelist.newNode()
|
|
||||||
n.t = t
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BTree) freeNode(n *node) {
|
|
||||||
for i := range n.items {
|
|
||||||
n.items[i] = nil // clear to allow GC
|
|
||||||
}
|
|
||||||
n.items = n.items[:0]
|
|
||||||
for i := range n.children {
|
|
||||||
n.children[i] = nil // clear to allow GC
|
|
||||||
}
|
|
||||||
n.children = n.children[:0]
|
|
||||||
n.t = nil // clear to allow GC
|
|
||||||
t.freelist.freeNode(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
|
||||||
// already equals the given one, it is removed from the tree and returned.
|
|
||||||
// Otherwise, nil is returned.
|
|
||||||
//
|
|
||||||
// nil cannot be added to the tree (will panic).
|
|
||||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
|
||||||
if item == nil {
|
|
||||||
panic("nil item being added to BTree")
|
|
||||||
}
|
|
||||||
if t.root == nil {
|
|
||||||
t.root = t.newNode()
|
|
||||||
t.root.items = append(t.root.items, item)
|
|
||||||
t.length++
|
|
||||||
return nil
|
|
||||||
} else if len(t.root.items) >= t.maxItems() {
|
|
||||||
item2, second := t.root.split(t.maxItems() / 2)
|
|
||||||
oldroot := t.root
|
|
||||||
t.root = t.newNode()
|
|
||||||
t.root.items = append(t.root.items, item2)
|
|
||||||
t.root.children = append(t.root.children, oldroot, second)
|
|
||||||
}
|
|
||||||
out := t.root.insert(item, t.maxItems())
|
|
||||||
if out == nil {
|
|
||||||
t.length++
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes an item equal to the passed in item from the tree, returning
|
|
||||||
// it. If no such item exists, returns nil.
|
|
||||||
func (t *BTree) Delete(item Item) Item {
|
|
||||||
return t.deleteItem(item, removeItem)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMin removes the smallest item in the tree and returns it.
|
|
||||||
// If no such item exists, returns nil.
|
|
||||||
func (t *BTree) DeleteMin() Item {
|
|
||||||
return t.deleteItem(nil, removeMin)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMax removes the largest item in the tree and returns it.
|
|
||||||
// If no such item exists, returns nil.
|
|
||||||
func (t *BTree) DeleteMax() Item {
|
|
||||||
return t.deleteItem(nil, removeMax)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
|
||||||
if t.root == nil || len(t.root.items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := t.root.remove(item, t.minItems(), typ)
|
|
||||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
|
||||||
oldroot := t.root
|
|
||||||
t.root = t.root.children[0]
|
|
||||||
t.freeNode(oldroot)
|
|
||||||
}
|
|
||||||
if out != nil {
|
|
||||||
t.length--
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// AscendRange calls the iterator for every value in the tree within the range
|
|
||||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
|
||||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(
|
|
||||||
func(a Item) bool { return !a.Less(greaterOrEqual) },
|
|
||||||
func(a Item) bool { return a.Less(lessThan) },
|
|
||||||
iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
|
||||||
// [first, pivot), until iterator returns false.
|
|
||||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(
|
|
||||||
func(a Item) bool { return true },
|
|
||||||
func(a Item) bool { return a.Less(pivot) },
|
|
||||||
iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
|
||||||
// the range [pivot, last], until iterator returns false.
|
|
||||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(
|
|
||||||
func(a Item) bool { return !a.Less(pivot) },
|
|
||||||
func(a Item) bool { return true },
|
|
||||||
iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ascend calls the iterator for every value in the tree within the range
|
|
||||||
// [first, last], until iterator returns false.
|
|
||||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(
|
|
||||||
func(a Item) bool { return true },
|
|
||||||
func(a Item) bool { return true },
|
|
||||||
iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
|
||||||
// unable to find that item.
|
|
||||||
func (t *BTree) Get(key Item) Item {
|
|
||||||
if t.root == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return t.root.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
|
||||||
func (t *BTree) Min() Item {
|
|
||||||
return min(t.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
|
||||||
func (t *BTree) Max() Item {
|
|
||||||
return max(t.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has returns true if the given key is in the tree.
|
|
||||||
func (t *BTree) Has(key Item) bool {
|
|
||||||
return t.Get(key) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items currently in the tree.
|
|
||||||
func (t *BTree) Len() int {
|
|
||||||
return t.length
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int implements the Item interface for integers.
|
|
||||||
type Int int
|
|
||||||
|
|
||||||
// Less returns true if int(a) < int(b).
|
|
||||||
func (a Int) Less(b Item) bool {
|
|
||||||
return a < b.(Int)
|
|
||||||
}
|
|
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
httpcache
|
|
||||||
=========
|
|
||||||
|
|
||||||
[](https://travis-ci.org/gregjones/httpcache) [](https://godoc.org/github.com/gregjones/httpcache)
|
|
||||||
|
|
||||||
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
|
|
||||||
|
|
||||||
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
|
|
||||||
|
|
||||||
Cache Backends
|
|
||||||
--------------
|
|
||||||
|
|
||||||
- The built-in 'memory' cache stores responses in an in-memory map.
|
|
||||||
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
|
|
||||||
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
|
|
||||||
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
|
|
||||||
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
|
|
||||||
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
|
|
||||||
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
- [MIT License](LICENSE.txt)
|
|
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
@ -1,61 +0,0 @@
|
|||||||
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
|
||||||
// to supplement an in-memory map with persistent storage
|
|
||||||
//
|
|
||||||
package diskcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"github.com/peterbourgon/diskv"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
|
||||||
type Cache struct {
|
|
||||||
d *diskv.Diskv
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the response corresponding to key if present
|
|
||||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
resp, err := c.d.Read(key)
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, false
|
|
||||||
}
|
|
||||||
return resp, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set saves a response to the cache as key
|
|
||||||
func (c *Cache) Set(key string, resp []byte) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
c.d.WriteStream(key, bytes.NewReader(resp), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the response with key from the cache
|
|
||||||
func (c *Cache) Delete(key string) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
c.d.Erase(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyToFilename(key string) string {
|
|
||||||
h := md5.New()
|
|
||||||
io.WriteString(h, key)
|
|
||||||
return hex.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Cache that will store files in basePath
|
|
||||||
func New(basePath string) *Cache {
|
|
||||||
return &Cache{
|
|
||||||
d: diskv.New(diskv.Options{
|
|
||||||
BasePath: basePath,
|
|
||||||
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
|
||||||
// storage.
|
|
||||||
func NewWithDiskv(d *diskv.Diskv) *Cache {
|
|
||||||
return &Cache{d}
|
|
||||||
}
|
|
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
@ -1,553 +0,0 @@
|
|||||||
// Package httpcache provides a http.RoundTripper implementation that works as a
|
|
||||||
// mostly RFC-compliant cache for http responses.
|
|
||||||
//
|
|
||||||
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
|
||||||
// and not for a shared proxy).
|
|
||||||
//
|
|
||||||
package httpcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
stale = iota
|
|
||||||
fresh
|
|
||||||
transparent
|
|
||||||
// XFromCache is the header added to responses that are returned from the cache
|
|
||||||
XFromCache = "X-From-Cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Cache interface is used by the Transport to store and retrieve responses.
|
|
||||||
type Cache interface {
|
|
||||||
// Get returns the []byte representation of a cached response and a bool
|
|
||||||
// set to true if the value isn't empty
|
|
||||||
Get(key string) (responseBytes []byte, ok bool)
|
|
||||||
// Set stores the []byte representation of a response against a key
|
|
||||||
Set(key string, responseBytes []byte)
|
|
||||||
// Delete removes the value associated with the key
|
|
||||||
Delete(key string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheKey returns the cache key for req.
|
|
||||||
func cacheKey(req *http.Request) string {
|
|
||||||
return req.URL.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CachedResponse returns the cached http.Response for req if present, and nil
|
|
||||||
// otherwise.
|
|
||||||
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
|
|
||||||
cachedVal, ok := c.Get(cacheKey(req))
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b := bytes.NewBuffer(cachedVal)
|
|
||||||
return http.ReadResponse(bufio.NewReader(b), req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
|
||||||
type MemoryCache struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
items map[string][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the []byte representation of the response and true if present, false if not
|
|
||||||
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
|
|
||||||
c.mu.RLock()
|
|
||||||
resp, ok = c.items[key]
|
|
||||||
c.mu.RUnlock()
|
|
||||||
return resp, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set saves response resp to the cache with key
|
|
||||||
func (c *MemoryCache) Set(key string, resp []byte) {
|
|
||||||
c.mu.Lock()
|
|
||||||
c.items[key] = resp
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes key from the cache
|
|
||||||
func (c *MemoryCache) Delete(key string) {
|
|
||||||
c.mu.Lock()
|
|
||||||
delete(c.items, key)
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
|
||||||
func NewMemoryCache() *MemoryCache {
|
|
||||||
c := &MemoryCache{items: map[string][]byte{}}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
|
||||||
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
|
||||||
// to repeated requests allowing servers to return 304 / Not Modified
|
|
||||||
type Transport struct {
|
|
||||||
// The RoundTripper interface actually used to make requests
|
|
||||||
// If nil, http.DefaultTransport is used
|
|
||||||
Transport http.RoundTripper
|
|
||||||
Cache Cache
|
|
||||||
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
|
||||||
MarkCachedResponses bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTransport returns a new Transport with the
|
|
||||||
// provided Cache implementation and MarkCachedResponses set to true
|
|
||||||
func NewTransport(c Cache) *Transport {
|
|
||||||
return &Transport{Cache: c, MarkCachedResponses: true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client returns an *http.Client that caches responses.
|
|
||||||
func (t *Transport) Client() *http.Client {
|
|
||||||
return &http.Client{Transport: t}
|
|
||||||
}
|
|
||||||
|
|
||||||
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
|
||||||
// match the new request
|
|
||||||
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
|
|
||||||
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
|
|
||||||
header = http.CanonicalHeaderKey(header)
|
|
||||||
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoundTrip takes a Request and returns a Response
|
|
||||||
//
|
|
||||||
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
|
||||||
// the server.
|
|
||||||
//
|
|
||||||
// If there is a stale Response, then any validators it contains will be set on the new request
|
|
||||||
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
|
||||||
// will be returned.
|
|
||||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
|
||||||
cacheKey := cacheKey(req)
|
|
||||||
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
|
|
||||||
var cachedResp *http.Response
|
|
||||||
if cacheable {
|
|
||||||
cachedResp, err = CachedResponse(t.Cache, req)
|
|
||||||
} else {
|
|
||||||
// Need to invalidate an existing value
|
|
||||||
t.Cache.Delete(cacheKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
transport := t.Transport
|
|
||||||
if transport == nil {
|
|
||||||
transport = http.DefaultTransport
|
|
||||||
}
|
|
||||||
|
|
||||||
if cacheable && cachedResp != nil && err == nil {
|
|
||||||
if t.MarkCachedResponses {
|
|
||||||
cachedResp.Header.Set(XFromCache, "1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if varyMatches(cachedResp, req) {
|
|
||||||
// Can only use cached value if the new request doesn't Vary significantly
|
|
||||||
freshness := getFreshness(cachedResp.Header, req.Header)
|
|
||||||
if freshness == fresh {
|
|
||||||
return cachedResp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if freshness == stale {
|
|
||||||
var req2 *http.Request
|
|
||||||
// Add validators if caller hasn't already done so
|
|
||||||
etag := cachedResp.Header.Get("etag")
|
|
||||||
if etag != "" && req.Header.Get("etag") == "" {
|
|
||||||
req2 = cloneRequest(req)
|
|
||||||
req2.Header.Set("if-none-match", etag)
|
|
||||||
}
|
|
||||||
lastModified := cachedResp.Header.Get("last-modified")
|
|
||||||
if lastModified != "" && req.Header.Get("last-modified") == "" {
|
|
||||||
if req2 == nil {
|
|
||||||
req2 = cloneRequest(req)
|
|
||||||
}
|
|
||||||
req2.Header.Set("if-modified-since", lastModified)
|
|
||||||
}
|
|
||||||
if req2 != nil {
|
|
||||||
req = req2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err = transport.RoundTrip(req)
|
|
||||||
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
|
|
||||||
// Replace the 304 response with the one from cache, but update with some new headers
|
|
||||||
endToEndHeaders := getEndToEndHeaders(resp.Header)
|
|
||||||
for _, header := range endToEndHeaders {
|
|
||||||
cachedResp.Header[header] = resp.Header[header]
|
|
||||||
}
|
|
||||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
|
||||||
cachedResp.StatusCode = http.StatusOK
|
|
||||||
|
|
||||||
resp = cachedResp
|
|
||||||
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
|
|
||||||
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
|
|
||||||
// In case of transport failure and stale-if-error activated, returns cached content
|
|
||||||
// when available
|
|
||||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
|
||||||
cachedResp.StatusCode = http.StatusOK
|
|
||||||
return cachedResp, nil
|
|
||||||
} else {
|
|
||||||
if err != nil || resp.StatusCode != http.StatusOK {
|
|
||||||
t.Cache.Delete(cacheKey)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
reqCacheControl := parseCacheControl(req.Header)
|
|
||||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
|
||||||
resp = newGatewayTimeoutResponse(req)
|
|
||||||
} else {
|
|
||||||
resp, err = transport.RoundTrip(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
|
|
||||||
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
|
|
||||||
varyKey = http.CanonicalHeaderKey(varyKey)
|
|
||||||
fakeHeader := "X-Varied-" + varyKey
|
|
||||||
reqValue := req.Header.Get(varyKey)
|
|
||||||
if reqValue != "" {
|
|
||||||
resp.Header.Set(fakeHeader, reqValue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch req.Method {
|
|
||||||
case "GET":
|
|
||||||
// Delay caching until EOF is reached.
|
|
||||||
resp.Body = &cachingReadCloser{
|
|
||||||
R: resp.Body,
|
|
||||||
OnEOF: func(r io.Reader) {
|
|
||||||
resp := *resp
|
|
||||||
resp.Body = ioutil.NopCloser(r)
|
|
||||||
respBytes, err := httputil.DumpResponse(&resp, true)
|
|
||||||
if err == nil {
|
|
||||||
t.Cache.Set(cacheKey, respBytes)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
respBytes, err := httputil.DumpResponse(resp, true)
|
|
||||||
if err == nil {
|
|
||||||
t.Cache.Set(cacheKey, respBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Cache.Delete(cacheKey)
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
|
||||||
var ErrNoDateHeader = errors.New("no Date header")
|
|
||||||
|
|
||||||
// Date parses and returns the value of the Date header.
|
|
||||||
func Date(respHeaders http.Header) (date time.Time, err error) {
|
|
||||||
dateHeader := respHeaders.Get("date")
|
|
||||||
if dateHeader == "" {
|
|
||||||
err = ErrNoDateHeader
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Parse(time.RFC1123, dateHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
type realClock struct{}
|
|
||||||
|
|
||||||
func (c *realClock) since(d time.Time) time.Duration {
|
|
||||||
return time.Since(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
type timer interface {
|
|
||||||
since(d time.Time) time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
var clock timer = &realClock{}
|
|
||||||
|
|
||||||
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
|
||||||
// values of the request and the response
|
|
||||||
//
|
|
||||||
// fresh indicates the response can be returned
|
|
||||||
// stale indicates that the response needs validating before it is returned
|
|
||||||
// transparent indicates the response should not be used to fulfil the request
|
|
||||||
//
|
|
||||||
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
|
||||||
// signficant. Similarly, smax-age isn't used.
|
|
||||||
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
|
|
||||||
respCacheControl := parseCacheControl(respHeaders)
|
|
||||||
reqCacheControl := parseCacheControl(reqHeaders)
|
|
||||||
if _, ok := reqCacheControl["no-cache"]; ok {
|
|
||||||
return transparent
|
|
||||||
}
|
|
||||||
if _, ok := respCacheControl["no-cache"]; ok {
|
|
||||||
return stale
|
|
||||||
}
|
|
||||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
|
||||||
return fresh
|
|
||||||
}
|
|
||||||
|
|
||||||
date, err := Date(respHeaders)
|
|
||||||
if err != nil {
|
|
||||||
return stale
|
|
||||||
}
|
|
||||||
currentAge := clock.since(date)
|
|
||||||
|
|
||||||
var lifetime time.Duration
|
|
||||||
var zeroDuration time.Duration
|
|
||||||
|
|
||||||
// If a response includes both an Expires header and a max-age directive,
|
|
||||||
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
|
||||||
if maxAge, ok := respCacheControl["max-age"]; ok {
|
|
||||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
lifetime = zeroDuration
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
expiresHeader := respHeaders.Get("Expires")
|
|
||||||
if expiresHeader != "" {
|
|
||||||
expires, err := time.Parse(time.RFC1123, expiresHeader)
|
|
||||||
if err != nil {
|
|
||||||
lifetime = zeroDuration
|
|
||||||
} else {
|
|
||||||
lifetime = expires.Sub(date)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxAge, ok := reqCacheControl["max-age"]; ok {
|
|
||||||
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
|
||||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
lifetime = zeroDuration
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
|
|
||||||
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
|
||||||
minfreshDuration, err := time.ParseDuration(minfresh + "s")
|
|
||||||
if err == nil {
|
|
||||||
currentAge = time.Duration(currentAge + minfreshDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxstale, ok := reqCacheControl["max-stale"]; ok {
|
|
||||||
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
|
||||||
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
|
||||||
// its expiration time by no more than the specified number of seconds.
|
|
||||||
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
|
||||||
//
|
|
||||||
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
|
||||||
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
|
||||||
// return-value available here.
|
|
||||||
if maxstale == "" {
|
|
||||||
return fresh
|
|
||||||
}
|
|
||||||
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
|
|
||||||
if err == nil {
|
|
||||||
currentAge = time.Duration(currentAge - maxstaleDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lifetime > currentAge {
|
|
||||||
return fresh
|
|
||||||
}
|
|
||||||
|
|
||||||
return stale
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if either the request or the response includes the stale-if-error
|
|
||||||
// cache control extension: https://tools.ietf.org/html/rfc5861
|
|
||||||
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
|
|
||||||
respCacheControl := parseCacheControl(respHeaders)
|
|
||||||
reqCacheControl := parseCacheControl(reqHeaders)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
lifetime := time.Duration(-1)
|
|
||||||
|
|
||||||
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
|
|
||||||
if staleMaxAge != "" {
|
|
||||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
|
|
||||||
if staleMaxAge != "" {
|
|
||||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lifetime >= 0 {
|
|
||||||
date, err := Date(respHeaders)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
currentAge := clock.since(date)
|
|
||||||
if lifetime > currentAge {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEndToEndHeaders(respHeaders http.Header) []string {
|
|
||||||
// These headers are always hop-by-hop
|
|
||||||
hopByHopHeaders := map[string]struct{}{
|
|
||||||
"Connection": struct{}{},
|
|
||||||
"Keep-Alive": struct{}{},
|
|
||||||
"Proxy-Authenticate": struct{}{},
|
|
||||||
"Proxy-Authorization": struct{}{},
|
|
||||||
"Te": struct{}{},
|
|
||||||
"Trailers": struct{}{},
|
|
||||||
"Transfer-Encoding": struct{}{},
|
|
||||||
"Upgrade": struct{}{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
|
|
||||||
// any header listed in connection, if present, is also considered hop-by-hop
|
|
||||||
if strings.Trim(extra, " ") != "" {
|
|
||||||
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
endToEndHeaders := []string{}
|
|
||||||
for respHeader, _ := range respHeaders {
|
|
||||||
if _, ok := hopByHopHeaders[respHeader]; !ok {
|
|
||||||
endToEndHeaders = append(endToEndHeaders, respHeader)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return endToEndHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
|
|
||||||
if _, ok := respCacheControl["no-store"]; ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if _, ok := reqCacheControl["no-store"]; ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
|
|
||||||
var braw bytes.Buffer
|
|
||||||
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
|
|
||||||
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// cloneRequest returns a clone of the provided *http.Request.
|
|
||||||
// The clone is a shallow copy of the struct and its Header map.
|
|
||||||
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
|
||||||
func cloneRequest(r *http.Request) *http.Request {
|
|
||||||
// shallow copy of the struct
|
|
||||||
r2 := new(http.Request)
|
|
||||||
*r2 = *r
|
|
||||||
// deep copy of the Header
|
|
||||||
r2.Header = make(http.Header)
|
|
||||||
for k, s := range r.Header {
|
|
||||||
r2.Header[k] = s
|
|
||||||
}
|
|
||||||
return r2
|
|
||||||
}
|
|
||||||
|
|
||||||
type cacheControl map[string]string
|
|
||||||
|
|
||||||
func parseCacheControl(headers http.Header) cacheControl {
|
|
||||||
cc := cacheControl{}
|
|
||||||
ccHeader := headers.Get("Cache-Control")
|
|
||||||
for _, part := range strings.Split(ccHeader, ",") {
|
|
||||||
part = strings.Trim(part, " ")
|
|
||||||
if part == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.ContainsRune(part, '=') {
|
|
||||||
keyval := strings.Split(part, "=")
|
|
||||||
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
|
|
||||||
} else {
|
|
||||||
cc[part] = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cc
|
|
||||||
}
|
|
||||||
|
|
||||||
// headerAllCommaSepValues returns all comma-separated values (each
|
|
||||||
// with whitespace trimmed) for header name in headers. According to
|
|
||||||
// Section 4.2 of the HTTP/1.1 spec
|
|
||||||
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
|
||||||
// values from multiple occurrences of a header should be concatenated, if
|
|
||||||
// the header's value is a comma-separated list.
|
|
||||||
func headerAllCommaSepValues(headers http.Header, name string) []string {
|
|
||||||
var vals []string
|
|
||||||
for _, val := range headers[http.CanonicalHeaderKey(name)] {
|
|
||||||
fields := strings.Split(val, ",")
|
|
||||||
for i, f := range fields {
|
|
||||||
fields[i] = strings.TrimSpace(f)
|
|
||||||
}
|
|
||||||
vals = append(vals, fields...)
|
|
||||||
}
|
|
||||||
return vals
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
|
|
||||||
// handler with a full copy of the content read from R when EOF is
|
|
||||||
// reached.
|
|
||||||
type cachingReadCloser struct {
|
|
||||||
// Underlying ReadCloser.
|
|
||||||
R io.ReadCloser
|
|
||||||
// OnEOF is called with a copy of the content of R when EOF is reached.
|
|
||||||
OnEOF func(io.Reader)
|
|
||||||
|
|
||||||
buf bytes.Buffer // buf stores a copy of the content of R.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads the next len(p) bytes from R or until R is drained. The
|
|
||||||
// return value n is the number of bytes read. If R has no data to
|
|
||||||
// return, err is io.EOF and OnEOF is called with a full copy of what
|
|
||||||
// has been read so far.
|
|
||||||
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.R.Read(p)
|
|
||||||
r.buf.Write(p[:n])
|
|
||||||
if err == io.EOF {
|
|
||||||
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *cachingReadCloser) Close() error {
|
|
||||||
return r.R.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
|
||||||
func NewMemoryCacheTransport() *Transport {
|
|
||||||
c := NewMemoryCache()
|
|
||||||
t := NewTransport(c)
|
|
||||||
return t
|
|
||||||
}
|
|
191
vendor/github.com/juju/ratelimit/LICENSE
generated
vendored
191
vendor/github.com/juju/ratelimit/LICENSE
generated
vendored
@ -1,191 +0,0 @@
|
|||||||
All files in this repository are licensed as follows. If you contribute
|
|
||||||
to this repository, it is assumed that you license your contribution
|
|
||||||
under the same license unless you state otherwise.
|
|
||||||
|
|
||||||
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
|
|
||||||
|
|
||||||
This software is licensed under the LGPLv3, included below.
|
|
||||||
|
|
||||||
As a special exception to the GNU Lesser General Public License version 3
|
|
||||||
("LGPL3"), the copyright holders of this Library give you permission to
|
|
||||||
convey to a third party a Combined Work that links statically or dynamically
|
|
||||||
to this Library without providing any Minimal Corresponding Source or
|
|
||||||
Minimal Application Code as set out in 4d or providing the installation
|
|
||||||
information set out in section 4e, provided that you comply with the other
|
|
||||||
provisions of LGPL3 and provided that you meet, for the Application the
|
|
||||||
terms and conditions of the license(s) which apply to the Application.
|
|
||||||
|
|
||||||
Except as stated in this special exception, the provisions of LGPL3 will
|
|
||||||
continue to comply in full to this Library. If you modify this Library, you
|
|
||||||
may apply this exception to your version of this Library, but you are not
|
|
||||||
obliged to do so. If you do not wish to do so, delete this exception
|
|
||||||
statement from your version. This exception does not (and cannot) modify any
|
|
||||||
license terms which apply to the Application, with which you must still
|
|
||||||
comply.
|
|
||||||
|
|
||||||
|
|
||||||
GNU LESSER GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
|
|
||||||
This version of the GNU Lesser General Public License incorporates
|
|
||||||
the terms and conditions of version 3 of the GNU General Public
|
|
||||||
License, supplemented by the additional permissions listed below.
|
|
||||||
|
|
||||||
0. Additional Definitions.
|
|
||||||
|
|
||||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
|
||||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
|
||||||
General Public License.
|
|
||||||
|
|
||||||
"The Library" refers to a covered work governed by this License,
|
|
||||||
other than an Application or a Combined Work as defined below.
|
|
||||||
|
|
||||||
An "Application" is any work that makes use of an interface provided
|
|
||||||
by the Library, but which is not otherwise based on the Library.
|
|
||||||
Defining a subclass of a class defined by the Library is deemed a mode
|
|
||||||
of using an interface provided by the Library.
|
|
||||||
|
|
||||||
A "Combined Work" is a work produced by combining or linking an
|
|
||||||
Application with the Library. The particular version of the Library
|
|
||||||
with which the Combined Work was made is also called the "Linked
|
|
||||||
Version".
|
|
||||||
|
|
||||||
The "Minimal Corresponding Source" for a Combined Work means the
|
|
||||||
Corresponding Source for the Combined Work, excluding any source code
|
|
||||||
for portions of the Combined Work that, considered in isolation, are
|
|
||||||
based on the Application, and not on the Linked Version.
|
|
||||||
|
|
||||||
The "Corresponding Application Code" for a Combined Work means the
|
|
||||||
object code and/or source code for the Application, including any data
|
|
||||||
and utility programs needed for reproducing the Combined Work from the
|
|
||||||
Application, but excluding the System Libraries of the Combined Work.
|
|
||||||
|
|
||||||
1. Exception to Section 3 of the GNU GPL.
|
|
||||||
|
|
||||||
You may convey a covered work under sections 3 and 4 of this License
|
|
||||||
without being bound by section 3 of the GNU GPL.
|
|
||||||
|
|
||||||
2. Conveying Modified Versions.
|
|
||||||
|
|
||||||
If you modify a copy of the Library, and, in your modifications, a
|
|
||||||
facility refers to a function or data to be supplied by an Application
|
|
||||||
that uses the facility (other than as an argument passed when the
|
|
||||||
facility is invoked), then you may convey a copy of the modified
|
|
||||||
version:
|
|
||||||
|
|
||||||
a) under this License, provided that you make a good faith effort to
|
|
||||||
ensure that, in the event an Application does not supply the
|
|
||||||
function or data, the facility still operates, and performs
|
|
||||||
whatever part of its purpose remains meaningful, or
|
|
||||||
|
|
||||||
b) under the GNU GPL, with none of the additional permissions of
|
|
||||||
this License applicable to that copy.
|
|
||||||
|
|
||||||
3. Object Code Incorporating Material from Library Header Files.
|
|
||||||
|
|
||||||
The object code form of an Application may incorporate material from
|
|
||||||
a header file that is part of the Library. You may convey such object
|
|
||||||
code under terms of your choice, provided that, if the incorporated
|
|
||||||
material is not limited to numerical parameters, data structure
|
|
||||||
layouts and accessors, or small macros, inline functions and templates
|
|
||||||
(ten or fewer lines in length), you do both of the following:
|
|
||||||
|
|
||||||
a) Give prominent notice with each copy of the object code that the
|
|
||||||
Library is used in it and that the Library and its use are
|
|
||||||
covered by this License.
|
|
||||||
|
|
||||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
|
||||||
document.
|
|
||||||
|
|
||||||
4. Combined Works.
|
|
||||||
|
|
||||||
You may convey a Combined Work under terms of your choice that,
|
|
||||||
taken together, effectively do not restrict modification of the
|
|
||||||
portions of the Library contained in the Combined Work and reverse
|
|
||||||
engineering for debugging such modifications, if you also do each of
|
|
||||||
the following:
|
|
||||||
|
|
||||||
a) Give prominent notice with each copy of the Combined Work that
|
|
||||||
the Library is used in it and that the Library and its use are
|
|
||||||
covered by this License.
|
|
||||||
|
|
||||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
|
||||||
document.
|
|
||||||
|
|
||||||
c) For a Combined Work that displays copyright notices during
|
|
||||||
execution, include the copyright notice for the Library among
|
|
||||||
these notices, as well as a reference directing the user to the
|
|
||||||
copies of the GNU GPL and this license document.
|
|
||||||
|
|
||||||
d) Do one of the following:
|
|
||||||
|
|
||||||
0) Convey the Minimal Corresponding Source under the terms of this
|
|
||||||
License, and the Corresponding Application Code in a form
|
|
||||||
suitable for, and under terms that permit, the user to
|
|
||||||
recombine or relink the Application with a modified version of
|
|
||||||
the Linked Version to produce a modified Combined Work, in the
|
|
||||||
manner specified by section 6 of the GNU GPL for conveying
|
|
||||||
Corresponding Source.
|
|
||||||
|
|
||||||
1) Use a suitable shared library mechanism for linking with the
|
|
||||||
Library. A suitable mechanism is one that (a) uses at run time
|
|
||||||
a copy of the Library already present on the user's computer
|
|
||||||
system, and (b) will operate properly with a modified version
|
|
||||||
of the Library that is interface-compatible with the Linked
|
|
||||||
Version.
|
|
||||||
|
|
||||||
e) Provide Installation Information, but only if you would otherwise
|
|
||||||
be required to provide such information under section 6 of the
|
|
||||||
GNU GPL, and only to the extent that such information is
|
|
||||||
necessary to install and execute a modified version of the
|
|
||||||
Combined Work produced by recombining or relinking the
|
|
||||||
Application with a modified version of the Linked Version. (If
|
|
||||||
you use option 4d0, the Installation Information must accompany
|
|
||||||
the Minimal Corresponding Source and Corresponding Application
|
|
||||||
Code. If you use option 4d1, you must provide the Installation
|
|
||||||
Information in the manner specified by section 6 of the GNU GPL
|
|
||||||
for conveying Corresponding Source.)
|
|
||||||
|
|
||||||
5. Combined Libraries.
|
|
||||||
|
|
||||||
You may place library facilities that are a work based on the
|
|
||||||
Library side by side in a single library together with other library
|
|
||||||
facilities that are not Applications and are not covered by this
|
|
||||||
License, and convey such a combined library under terms of your
|
|
||||||
choice, if you do both of the following:
|
|
||||||
|
|
||||||
a) Accompany the combined library with a copy of the same work based
|
|
||||||
on the Library, uncombined with any other library facilities,
|
|
||||||
conveyed under the terms of this License.
|
|
||||||
|
|
||||||
b) Give prominent notice with the combined library that part of it
|
|
||||||
is a work based on the Library, and explaining where to find the
|
|
||||||
accompanying uncombined form of the same work.
|
|
||||||
|
|
||||||
6. Revised Versions of the GNU Lesser General Public License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions
|
|
||||||
of the GNU Lesser General Public License from time to time. Such new
|
|
||||||
versions will be similar in spirit to the present version, but may
|
|
||||||
differ in detail to address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Library as you received it specifies that a certain numbered version
|
|
||||||
of the GNU Lesser General Public License "or any later version"
|
|
||||||
applies to it, you have the option of following the terms and
|
|
||||||
conditions either of that published version or of any later version
|
|
||||||
published by the Free Software Foundation. If the Library as you
|
|
||||||
received it does not specify a version number of the GNU Lesser
|
|
||||||
General Public License, you may choose any version of the GNU Lesser
|
|
||||||
General Public License ever published by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Library as you received it specifies that a proxy can decide
|
|
||||||
whether future versions of the GNU Lesser General Public License shall
|
|
||||||
apply, that proxy's public statement of acceptance of any version is
|
|
||||||
permanent authorization for you to choose that version for the
|
|
||||||
Library.
|
|
117
vendor/github.com/juju/ratelimit/README.md
generated
vendored
117
vendor/github.com/juju/ratelimit/README.md
generated
vendored
@ -1,117 +0,0 @@
|
|||||||
# ratelimit
|
|
||||||
--
|
|
||||||
import "github.com/juju/ratelimit"
|
|
||||||
|
|
||||||
The ratelimit package provides an efficient token bucket implementation. See
|
|
||||||
http://en.wikipedia.org/wiki/Token_bucket.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
#### func Reader
|
|
||||||
|
|
||||||
```go
|
|
||||||
func Reader(r io.Reader, bucket *Bucket) io.Reader
|
|
||||||
```
|
|
||||||
Reader returns a reader that is rate limited by the given token bucket. Each
|
|
||||||
token in the bucket represents one byte.
|
|
||||||
|
|
||||||
#### func Writer
|
|
||||||
|
|
||||||
```go
|
|
||||||
func Writer(w io.Writer, bucket *Bucket) io.Writer
|
|
||||||
```
|
|
||||||
Writer returns a writer that is rate limited by the given token bucket. Each
|
|
||||||
token in the bucket represents one byte.
|
|
||||||
|
|
||||||
#### type Bucket
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Bucket struct {
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Bucket represents a token bucket that fills at a predetermined rate. Methods on
|
|
||||||
Bucket may be called concurrently.
|
|
||||||
|
|
||||||
#### func NewBucket
|
|
||||||
|
|
||||||
```go
|
|
||||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
|
|
||||||
```
|
|
||||||
NewBucket returns a new token bucket that fills at the rate of one token every
|
|
||||||
fillInterval, up to the given maximum capacity. Both arguments must be positive.
|
|
||||||
The bucket is initially full.
|
|
||||||
|
|
||||||
#### func NewBucketWithQuantum
|
|
||||||
|
|
||||||
```go
|
|
||||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
|
|
||||||
```
|
|
||||||
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
|
|
||||||
the quantum size - quantum tokens are added every fillInterval.
|
|
||||||
|
|
||||||
#### func NewBucketWithRate
|
|
||||||
|
|
||||||
```go
|
|
||||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket
|
|
||||||
```
|
|
||||||
NewBucketWithRate returns a token bucket that fills the bucket at the rate of
|
|
||||||
rate tokens per second up to the given maximum capacity. Because of limited
|
|
||||||
clock resolution, at high rates, the actual rate may be up to 1% different from
|
|
||||||
the specified rate.
|
|
||||||
|
|
||||||
#### func (*Bucket) Rate
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (tb *Bucket) Rate() float64
|
|
||||||
```
|
|
||||||
Rate returns the fill rate of the bucket, in tokens per second.
|
|
||||||
|
|
||||||
#### func (*Bucket) Take
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (tb *Bucket) Take(count int64) time.Duration
|
|
||||||
```
|
|
||||||
Take takes count tokens from the bucket without blocking. It returns the time
|
|
||||||
that the caller should wait until the tokens are actually available.
|
|
||||||
|
|
||||||
Note that if the request is irrevocable - there is no way to return tokens to
|
|
||||||
the bucket once this method commits us to taking them.
|
|
||||||
|
|
||||||
#### func (*Bucket) TakeAvailable
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (tb *Bucket) TakeAvailable(count int64) int64
|
|
||||||
```
|
|
||||||
TakeAvailable takes up to count immediately available tokens from the bucket. It
|
|
||||||
returns the number of tokens removed, or zero if there are no available tokens.
|
|
||||||
It does not block.
|
|
||||||
|
|
||||||
#### func (*Bucket) TakeMaxDuration
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
|
|
||||||
```
|
|
||||||
TakeMaxDuration is like Take, except that it will only take tokens from the
|
|
||||||
bucket if the wait time for the tokens is no greater than maxWait.
|
|
||||||
|
|
||||||
If it would take longer than maxWait for the tokens to become available, it does
|
|
||||||
nothing and reports false, otherwise it returns the time that the caller should
|
|
||||||
wait until the tokens are actually available, and reports true.
|
|
||||||
|
|
||||||
#### func (*Bucket) Wait
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (tb *Bucket) Wait(count int64)
|
|
||||||
```
|
|
||||||
Wait takes count tokens from the bucket, waiting until they are available.
|
|
||||||
|
|
||||||
#### func (*Bucket) WaitMaxDuration
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
|
|
||||||
```
|
|
||||||
WaitMaxDuration is like Wait except that it will only take tokens from the
|
|
||||||
bucket if it needs to wait for no greater than maxWait. It reports whether any
|
|
||||||
tokens have been removed from the bucket If no tokens have been removed, it
|
|
||||||
returns immediately.
|
|
284
vendor/github.com/juju/ratelimit/ratelimit.go
generated
vendored
284
vendor/github.com/juju/ratelimit/ratelimit.go
generated
vendored
@ -1,284 +0,0 @@
|
|||||||
// Copyright 2014 Canonical Ltd.
|
|
||||||
// Licensed under the LGPLv3 with static-linking exception.
|
|
||||||
// See LICENCE file for details.
|
|
||||||
|
|
||||||
// Package ratelimit provides an efficient token bucket implementation
|
|
||||||
// that can be used to limit the rate of arbitrary things.
|
|
||||||
// See http://en.wikipedia.org/wiki/Token_bucket.
|
|
||||||
package ratelimit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Bucket represents a token bucket that fills at a predetermined rate.
|
|
||||||
// Methods on Bucket may be called concurrently.
|
|
||||||
type Bucket struct {
|
|
||||||
startTime time.Time
|
|
||||||
capacity int64
|
|
||||||
quantum int64
|
|
||||||
fillInterval time.Duration
|
|
||||||
clock Clock
|
|
||||||
|
|
||||||
// The mutex guards the fields following it.
|
|
||||||
mu sync.Mutex
|
|
||||||
|
|
||||||
// avail holds the number of available tokens
|
|
||||||
// in the bucket, as of availTick ticks from startTime.
|
|
||||||
// It will be negative when there are consumers
|
|
||||||
// waiting for tokens.
|
|
||||||
avail int64
|
|
||||||
availTick int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clock is used to inject testable fakes.
|
|
||||||
type Clock interface {
|
|
||||||
Now() time.Time
|
|
||||||
Sleep(d time.Duration)
|
|
||||||
}
|
|
||||||
|
|
||||||
// realClock implements Clock in terms of standard time functions.
|
|
||||||
type realClock struct{}
|
|
||||||
|
|
||||||
// Now is identical to time.Now.
|
|
||||||
func (realClock) Now() time.Time {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sleep is identical to time.Sleep.
|
|
||||||
func (realClock) Sleep(d time.Duration) {
|
|
||||||
time.Sleep(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucket returns a new token bucket that fills at the
|
|
||||||
// rate of one token every fillInterval, up to the given
|
|
||||||
// maximum capacity. Both arguments must be
|
|
||||||
// positive. The bucket is initially full.
|
|
||||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
|
|
||||||
return NewBucketWithClock(fillInterval, capacity, realClock{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucketWithClock is identical to NewBucket but injects a testable clock
|
|
||||||
// interface.
|
|
||||||
func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket {
|
|
||||||
return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// rateMargin specifes the allowed variance of actual
|
|
||||||
// rate from specified rate. 1% seems reasonable.
|
|
||||||
const rateMargin = 0.01
|
|
||||||
|
|
||||||
// NewBucketWithRate returns a token bucket that fills the bucket
|
|
||||||
// at the rate of rate tokens per second up to the given
|
|
||||||
// maximum capacity. Because of limited clock resolution,
|
|
||||||
// at high rates, the actual rate may be up to 1% different from the
|
|
||||||
// specified rate.
|
|
||||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
|
||||||
return NewBucketWithRateAndClock(rate, capacity, realClock{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a
|
|
||||||
// testable clock interface.
|
|
||||||
func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket {
|
|
||||||
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
|
|
||||||
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
|
|
||||||
if fillInterval <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock)
|
|
||||||
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
|
||||||
return tb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextQuantum returns the next quantum to try after q.
|
|
||||||
// We grow the quantum exponentially, but slowly, so we
|
|
||||||
// get a good fit in the lower numbers.
|
|
||||||
func nextQuantum(q int64) int64 {
|
|
||||||
q1 := q * 11 / 10
|
|
||||||
if q1 == q {
|
|
||||||
q1++
|
|
||||||
}
|
|
||||||
return q1
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucketWithQuantum is similar to NewBucket, but allows
|
|
||||||
// the specification of the quantum size - quantum tokens
|
|
||||||
// are added every fillInterval.
|
|
||||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
|
||||||
return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects
|
|
||||||
// a testable clock interface.
|
|
||||||
func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket {
|
|
||||||
if fillInterval <= 0 {
|
|
||||||
panic("token bucket fill interval is not > 0")
|
|
||||||
}
|
|
||||||
if capacity <= 0 {
|
|
||||||
panic("token bucket capacity is not > 0")
|
|
||||||
}
|
|
||||||
if quantum <= 0 {
|
|
||||||
panic("token bucket quantum is not > 0")
|
|
||||||
}
|
|
||||||
return &Bucket{
|
|
||||||
clock: clock,
|
|
||||||
startTime: clock.Now(),
|
|
||||||
capacity: capacity,
|
|
||||||
quantum: quantum,
|
|
||||||
avail: capacity,
|
|
||||||
fillInterval: fillInterval,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait takes count tokens from the bucket, waiting until they are
|
|
||||||
// available.
|
|
||||||
func (tb *Bucket) Wait(count int64) {
|
|
||||||
if d := tb.Take(count); d > 0 {
|
|
||||||
tb.clock.Sleep(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitMaxDuration is like Wait except that it will
|
|
||||||
// only take tokens from the bucket if it needs to wait
|
|
||||||
// for no greater than maxWait. It reports whether
|
|
||||||
// any tokens have been removed from the bucket
|
|
||||||
// If no tokens have been removed, it returns immediately.
|
|
||||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
|
|
||||||
d, ok := tb.TakeMaxDuration(count, maxWait)
|
|
||||||
if d > 0 {
|
|
||||||
tb.clock.Sleep(d)
|
|
||||||
}
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
const infinityDuration time.Duration = 0x7fffffffffffffff
|
|
||||||
|
|
||||||
// Take takes count tokens from the bucket without blocking. It returns
|
|
||||||
// the time that the caller should wait until the tokens are actually
|
|
||||||
// available.
|
|
||||||
//
|
|
||||||
// Note that if the request is irrevocable - there is no way to return
|
|
||||||
// tokens to the bucket once this method commits us to taking them.
|
|
||||||
func (tb *Bucket) Take(count int64) time.Duration {
|
|
||||||
d, _ := tb.take(tb.clock.Now(), count, infinityDuration)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// TakeMaxDuration is like Take, except that
|
|
||||||
// it will only take tokens from the bucket if the wait
|
|
||||||
// time for the tokens is no greater than maxWait.
|
|
||||||
//
|
|
||||||
// If it would take longer than maxWait for the tokens
|
|
||||||
// to become available, it does nothing and reports false,
|
|
||||||
// otherwise it returns the time that the caller should
|
|
||||||
// wait until the tokens are actually available, and reports
|
|
||||||
// true.
|
|
||||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
|
|
||||||
return tb.take(tb.clock.Now(), count, maxWait)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TakeAvailable takes up to count immediately available tokens from the
|
|
||||||
// bucket. It returns the number of tokens removed, or zero if there are
|
|
||||||
// no available tokens. It does not block.
|
|
||||||
func (tb *Bucket) TakeAvailable(count int64) int64 {
|
|
||||||
return tb.takeAvailable(tb.clock.Now(), count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// takeAvailable is the internal version of TakeAvailable - it takes the
|
|
||||||
// current time as an argument to enable easy testing.
|
|
||||||
func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
|
||||||
if count <= 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
|
|
||||||
tb.adjust(now)
|
|
||||||
if tb.avail <= 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if count > tb.avail {
|
|
||||||
count = tb.avail
|
|
||||||
}
|
|
||||||
tb.avail -= count
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available returns the number of available tokens. It will be negative
|
|
||||||
// when there are consumers waiting for tokens. Note that if this
|
|
||||||
// returns greater than zero, it does not guarantee that calls that take
|
|
||||||
// tokens from the buffer will succeed, as the number of available
|
|
||||||
// tokens could have changed in the meantime. This method is intended
|
|
||||||
// primarily for metrics reporting and debugging.
|
|
||||||
func (tb *Bucket) Available() int64 {
|
|
||||||
return tb.available(tb.clock.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
// available is the internal version of available - it takes the current time as
|
|
||||||
// an argument to enable easy testing.
|
|
||||||
func (tb *Bucket) available(now time.Time) int64 {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
tb.adjust(now)
|
|
||||||
return tb.avail
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capacity returns the capacity that the bucket was created with.
|
|
||||||
func (tb *Bucket) Capacity() int64 {
|
|
||||||
return tb.capacity
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate returns the fill rate of the bucket, in tokens per second.
|
|
||||||
func (tb *Bucket) Rate() float64 {
|
|
||||||
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
|
|
||||||
}
|
|
||||||
|
|
||||||
// take is the internal version of Take - it takes the current time as
|
|
||||||
// an argument to enable easy testing.
|
|
||||||
func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
|
|
||||||
if count <= 0 {
|
|
||||||
return 0, true
|
|
||||||
}
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
|
|
||||||
currentTick := tb.adjust(now)
|
|
||||||
avail := tb.avail - count
|
|
||||||
if avail >= 0 {
|
|
||||||
tb.avail = avail
|
|
||||||
return 0, true
|
|
||||||
}
|
|
||||||
// Round up the missing tokens to the nearest multiple
|
|
||||||
// of quantum - the tokens won't be available until
|
|
||||||
// that tick.
|
|
||||||
endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
|
|
||||||
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
|
|
||||||
waitTime := endTime.Sub(now)
|
|
||||||
if waitTime > maxWait {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
tb.avail = avail
|
|
||||||
return waitTime, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// adjust adjusts the current bucket capacity based on the current time.
|
|
||||||
// It returns the current tick.
|
|
||||||
func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
|
|
||||||
currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
|
|
||||||
|
|
||||||
if tb.avail >= tb.capacity {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tb.avail += (currentTick - tb.availTick) * tb.quantum
|
|
||||||
if tb.avail > tb.capacity {
|
|
||||||
tb.avail = tb.capacity
|
|
||||||
}
|
|
||||||
tb.availTick = currentTick
|
|
||||||
return
|
|
||||||
}
|
|
51
vendor/github.com/juju/ratelimit/reader.go
generated
vendored
51
vendor/github.com/juju/ratelimit/reader.go
generated
vendored
@ -1,51 +0,0 @@
|
|||||||
// Copyright 2014 Canonical Ltd.
|
|
||||||
// Licensed under the LGPLv3 with static-linking exception.
|
|
||||||
// See LICENCE file for details.
|
|
||||||
|
|
||||||
package ratelimit
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
type reader struct {
|
|
||||||
r io.Reader
|
|
||||||
bucket *Bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader returns a reader that is rate limited by
|
|
||||||
// the given token bucket. Each token in the bucket
|
|
||||||
// represents one byte.
|
|
||||||
func Reader(r io.Reader, bucket *Bucket) io.Reader {
|
|
||||||
return &reader{
|
|
||||||
r: r,
|
|
||||||
bucket: bucket,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *reader) Read(buf []byte) (int, error) {
|
|
||||||
n, err := r.r.Read(buf)
|
|
||||||
if n <= 0 {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
r.bucket.Wait(int64(n))
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type writer struct {
|
|
||||||
w io.Writer
|
|
||||||
bucket *Bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer returns a reader that is rate limited by
|
|
||||||
// the given token bucket. Each token in the bucket
|
|
||||||
// represents one byte.
|
|
||||||
func Writer(w io.Writer, bucket *Bucket) io.Writer {
|
|
||||||
return &writer{
|
|
||||||
w: w,
|
|
||||||
bucket: bucket,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Write(buf []byte) (int, error) {
|
|
||||||
w.bucket.Wait(int64(len(buf)))
|
|
||||||
return w.w.Write(buf)
|
|
||||||
}
|
|
7
vendor/github.com/mailru/easyjson/LICENSE
generated
vendored
7
vendor/github.com/mailru/easyjson/LICENSE
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
Copyright (c) 2016 Mail.Ru Group
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
193
vendor/github.com/mailru/easyjson/README.md
generated
vendored
193
vendor/github.com/mailru/easyjson/README.md
generated
vendored
@ -1,193 +0,0 @@
|
|||||||
# easyjson [](https://travis-ci.org/mailru/easyjson)
|
|
||||||
|
|
||||||
easyjson allows to (un-)marshal JSON golang structs without the use of reflection by generating marshaller code.
|
|
||||||
|
|
||||||
One of the aims of the library is to keep generated code simple enough so that it can be easily optimized or fixed. Another goal is to provide users with ability to customize the generated code not available in 'encoding/json', such as generating snake_case names or enabling 'omitempty' behavior by default.
|
|
||||||
|
|
||||||
## usage
|
|
||||||
```
|
|
||||||
go get github.com/mailru/easyjson/...
|
|
||||||
easyjson -all <file>.go
|
|
||||||
```
|
|
||||||
|
|
||||||
This will generate `<file>_easyjson.go` with marshaller/unmarshaller methods for structs. `GOPATH` variable needs to be set up correctly, since the generation invokes a `go run` on a temporary file (this is a really convenient approach to code generation borrowed from https://github.com/pquerna/ffjson).
|
|
||||||
|
|
||||||
## options
|
|
||||||
```
|
|
||||||
Usage of .root/bin/easyjson:
|
|
||||||
-all
|
|
||||||
generate un-/marshallers for all structs in a file
|
|
||||||
-build_tags string
|
|
||||||
build tags to add to generated file
|
|
||||||
-leave_temps
|
|
||||||
do not delete temporary files
|
|
||||||
-no_std_marshalers
|
|
||||||
don't generate MarshalJSON/UnmarshalJSON methods
|
|
||||||
-noformat
|
|
||||||
do not run 'gofmt -w' on output file
|
|
||||||
-omit_empty
|
|
||||||
omit empty fields by default
|
|
||||||
-snake_case
|
|
||||||
use snake_case names instead of CamelCase by default
|
|
||||||
-stubs
|
|
||||||
only generate stubs for marshallers/unmarshallers methods
|
|
||||||
```
|
|
||||||
|
|
||||||
Using `-all` will generate (un-)marshallers for all structs in the file. By default, structs need to have a line beginning with `easyjson:json` in their docstring, e.g.:
|
|
||||||
```
|
|
||||||
//easyjson:json
|
|
||||||
struct A{}
|
|
||||||
```
|
|
||||||
|
|
||||||
`-snake_case` tells easyjson to generate snake\_case field names by default (unless explicitly overriden by a field tag). The CamelCase to snake\_case conversion algorithm should work in most cases (e.g. HTTPVersion will be converted to http_version). There can be names like JSONHTTPRPC where the conversion will return an unexpected result (jsonhttprpc without underscores), but such names require a dictionary to do the conversion and may be ambiguous.
|
|
||||||
|
|
||||||
`-build_tags` will add corresponding build tag line for the generated file.
|
|
||||||
## marshaller/unmarshaller interfaces
|
|
||||||
|
|
||||||
easyjson generates MarshalJSON/UnmarshalJSON methods that are compatible with interfaces from 'encoding/json'. They are usable with 'json.Marshal' and 'json.Unmarshal' functions, however actually using those will result in significantly worse performance compared to custom interfaces.
|
|
||||||
|
|
||||||
`MarshalEasyJSON` / `UnmarshalEasyJSON` methods are generated for faster parsing using custom Lexer/Writer structs (`jlexer.Lexer` and `jwriter.Writer`). The method signature is defined in `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. These interfaces allow to avoid using any unnecessary reflection or type assertions during parsing. Functions can be used manually or with `easyjson.Marshal<...>` and `easyjson.Unmarshal<...>` helper methods.
|
|
||||||
|
|
||||||
`jwriter.Writer` struct in addition to function for returning the data as a single slice also has methods to return the size and to send the data to an `io.Writer`. This is aimed at a typical HTTP use-case, when you want to know the `Content-Length` before actually starting to send the data.
|
|
||||||
|
|
||||||
There are helpers in the top-level package for marhsaling/unmarshaling the data using custom interfaces to and from writers, including a helper for `http.ResponseWriter`.
|
|
||||||
|
|
||||||
## custom types
|
|
||||||
If `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces are implemented by a type involved in JSON parsing, the type will be marshaled/unmarshaled using these methods. `easyjson.Optional` interface allows for a custom type to integrate with 'omitempty' logic.
|
|
||||||
|
|
||||||
As an example, easyjson includes an `easyjson.RawMessage` analogous to `json.RawMessage`.
|
|
||||||
|
|
||||||
Also, there are 'optional' wrappers for primitive types in `easyjson/opt` package. These are useful in the case when it is necessary to distinguish between missing and default value for the type. Wrappers allow to avoid pointers and extra heap allocations in such cases.
|
|
||||||
|
|
||||||
## memory pooling
|
|
||||||
|
|
||||||
The library uses a custom buffer which allocates data in increasing chunks (128-32768 bytes). Chunks of 512 bytes and larger are reused with the help of `sync.Pool`. The maximum size of a chunk is bounded to reduce redundancy in memory allocation and to make the chunks more reusable in the case of large buffer sizes.
|
|
||||||
|
|
||||||
The buffer code is in `easyjson/buffer` package the exact values can be tweaked by a `buffer.Init()` call before the first serialization.
|
|
||||||
|
|
||||||
## limitations
|
|
||||||
* The library is at an early stage, there are likely to be some bugs and some features of 'encoding/json' may not be supported. Please report such cases, so that they may be fixed sooner.
|
|
||||||
* Object keys are case-sensitive (unlike encodin/json). Case-insentive behavior will be implemented as an option (case-insensitive matching is slower).
|
|
||||||
* Unsafe package is used by the code. While a non-unsafe version of easyjson can be made in the future, using unsafe package simplifies a lot of code by allowing no-copy []byte to string conversion within the library. This is used only during parsing and all the returned values are allocated properly.
|
|
||||||
* Floats are currently formatted with default precision for 'strconv' package. It is obvious that it is not always the correct way to handle it, but there aren't enough use-cases for floats at hand to do anything better.
|
|
||||||
* During parsing, parts of JSON that are skipped over are not syntactically validated more than required to skip matching parentheses.
|
|
||||||
* No true streaming support for encoding/decoding. For many use-cases and protocols, data length is typically known on input and needs to be known before sending the data.
|
|
||||||
|
|
||||||
## benchmarks
|
|
||||||
Most benchmarks were done using a sample 13kB JSON (9k if serialized back trimming the whitespace) from https://dev.twitter.com/rest/reference/get/search/tweets. The sample is very close to real-world data, quite structured and contains a variety of different types.
|
|
||||||
|
|
||||||
For small request benchmarks, an 80-byte portion of the regular sample was used.
|
|
||||||
|
|
||||||
For large request marshalling benchmarks, a struct containing 50 regular samples was used, making a ~500kB output JSON.
|
|
||||||
|
|
||||||
Benchmarks are available in the repository and are run on 'make'.
|
|
||||||
|
|
||||||
### easyjson vs. encoding/json
|
|
||||||
|
|
||||||
easyjson seems to be 5-6 times faster than the default json serialization for unmarshalling, 3-4 times faster for non-concurrent marshalling. Concurrent marshalling is 6-7x faster if marshalling to a writer.
|
|
||||||
|
|
||||||
### easyjson vs. ffjson
|
|
||||||
|
|
||||||
easyjson uses the same approach for code generation as ffjson, but a significantly different approach to lexing and generated code. This allows easyjson to be 2-3x faster for unmarshalling and 1.5-2x faster for non-concurrent unmarshalling.
|
|
||||||
|
|
||||||
ffjson seems to behave weird if used concurrently: for large request pooling hurts performance instead of boosting it, it also does not quite scale well. These issues are likely to be fixable and until that comparisons might vary from version to version a lot.
|
|
||||||
|
|
||||||
easyjson is similar in performance for small requests and 2-5x times faster for large ones if used with a writer.
|
|
||||||
|
|
||||||
### easyjson vs. go/codec
|
|
||||||
|
|
||||||
github.com/ugorji/go/codec library provides compile-time helpers for JSON generation. In this case, helpers are not exactly marshallers as they are encoding-independent.
|
|
||||||
|
|
||||||
easyjson is generally ~2x faster for non-concurrent benchmarks and about 3x faster for concurrent encoding (without marshalling to a writer). Unsafe option for generated helpers was used.
|
|
||||||
|
|
||||||
As an attempt to measure marshalling performance of 'go/codec' (as opposed to allocations/memcpy/writer interface invocations), a benchmark was done with resetting lenght of a byte slice rather than resetting the whole slice to nil. However, the optimization in this exact form may not be applicable in practice, since the memory is not freed between marshalling operations.
|
|
||||||
|
|
||||||
### easyjson vs 'ujson' python module
|
|
||||||
ujson is using C code for parsing, so it is interesting to see how plain golang compares to that. It is imporant to note that the resulting object for python is slower to access, since the library parses JSON object into dictionaries.
|
|
||||||
|
|
||||||
easyjson seems to be slightly faster for unmarshalling (finally!) and 2-3x faster for marshalling.
|
|
||||||
|
|
||||||
### benchmark figures
|
|
||||||
The data was measured on 4 February, 2016 using current ffjson and golang 1.6. Data for go/codec was added on 4 March 2016, benchmarked on the same machine.
|
|
||||||
|
|
||||||
#### Unmarshalling
|
|
||||||
| lib | json size | MB/s | allocs/op | B/op
|
|
||||||
|--------|-----------|------|-----------|-------
|
|
||||||
|standard| regular | 22 | 218 | 10229
|
|
||||||
|standard| small | 9.7 | 14 | 720
|
|
||||||
|--------|-----------|------|-----------|-------
|
|
||||||
|easyjson| regular | 125 | 128 | 9794
|
|
||||||
|easyjson| small | 67 | 3 | 128
|
|
||||||
|--------|-----------|------|-----------|-------
|
|
||||||
|ffjson | regular | 66 | 141 | 9985
|
|
||||||
|ffjson | small | 17.6 | 10 | 488
|
|
||||||
|--------|-----------|------|-----------|-------
|
|
||||||
|codec | regular | 55 | 434 | 19299
|
|
||||||
|codec | small | 29 | 7 | 336
|
|
||||||
|--------|-----------|------|-----------|-------
|
|
||||||
|ujson | regular | 103 | N/A | N/A
|
|
||||||
|
|
||||||
#### Marshalling, one goroutine.
|
|
||||||
| lib | json size | MB/s | allocs/op | B/op
|
|
||||||
|----------|-----------|------|-----------|-------
|
|
||||||
|standard | regular | 75 | 9 | 23256
|
|
||||||
|standard | small | 32 | 3 | 328
|
|
||||||
|standard | large | 80 | 17 | 1.2M
|
|
||||||
|----------|-----------|------|-----------|-------
|
|
||||||
|easyjson | regular | 213 | 9 | 10260
|
|
||||||
|easyjson* | regular | 263 | 8 | 742
|
|
||||||
|easyjson | small | 125 | 1 | 128
|
|
||||||
|easyjson | large | 212 | 33 | 490k
|
|
||||||
|easyjson* | large | 262 | 25 | 2879
|
|
||||||
|----------|-----------|------|-----------|-------
|
|
||||||
|ffjson | regular | 122 | 153 | 21340
|
|
||||||
|ffjson** | regular | 146 | 152 | 4897
|
|
||||||
|ffjson | small | 36 | 5 | 384
|
|
||||||
|ffjson** | small | 64 | 4 | 128
|
|
||||||
|ffjson | large | 134 | 7317 | 818k
|
|
||||||
|ffjson** | large | 125 | 7320 | 827k
|
|
||||||
|----------|-----------|------|-----------|-------
|
|
||||||
|codec | regular | 80 | 17 | 33601
|
|
||||||
|codec*** | regular | 108 | 9 | 1153
|
|
||||||
|codec | small | 42 | 3 | 304
|
|
||||||
|codec*** | small | 56 | 1 | 48
|
|
||||||
|codec | large | 73 | 483 | 2.5M
|
|
||||||
|codec*** | large | 103 | 451 | 66007
|
|
||||||
|----------|-----------|------|-----------|-------
|
|
||||||
|ujson | regular | 92 | N/A | N/A
|
|
||||||
\* marshalling to a writer,
|
|
||||||
\*\* using `ffjson.Pool()`,
|
|
||||||
\*\*\* reusing output slice instead of resetting it to nil
|
|
||||||
|
|
||||||
#### Marshalling, concurrent.
|
|
||||||
| lib | json size | MB/s | allocs/op | B/op
|
|
||||||
|----------|-----------|-------|-----------|-------
|
|
||||||
|standard | regular | 252 | 9 | 23257
|
|
||||||
|standard | small | 124 | 3 | 328
|
|
||||||
|standard | large | 289 | 17 | 1.2M
|
|
||||||
|----------|-----------|-------|-----------|-------
|
|
||||||
|easyjson | regular | 792 | 9 | 10597
|
|
||||||
|easyjson* | regular | 1748 | 8 | 779
|
|
||||||
|easyjson | small | 333 | 1 | 128
|
|
||||||
|easyjson | large | 718 | 36 | 548k
|
|
||||||
|easyjson* | large | 2134 | 25 | 4957
|
|
||||||
|----------|-----------|------|-----------|-------
|
|
||||||
|ffjson | regular | 301 | 153 | 21629
|
|
||||||
|ffjson** | regular | 707 | 152 | 5148
|
|
||||||
|ffjson | small | 62 | 5 | 384
|
|
||||||
|ffjson** | small | 282 | 4 | 128
|
|
||||||
|ffjson | large | 438 | 7330 | 1.0M
|
|
||||||
|ffjson** | large | 131 | 7319 | 820k
|
|
||||||
|----------|-----------|------|-----------|-------
|
|
||||||
|codec | regular | 183 | 17 | 33603
|
|
||||||
|codec*** | regular | 671 | 9 | 1157
|
|
||||||
|codec | small | 147 | 3 | 304
|
|
||||||
|codec*** | small | 299 | 1 | 48
|
|
||||||
|codec | large | 190 | 483 | 2.5M
|
|
||||||
|codec*** | large | 752 | 451 | 77574
|
|
||||||
\* marshalling to a writer,
|
|
||||||
\*\* using `ffjson.Pool()`,
|
|
||||||
\*\*\* reusing output slice instead of resetting it to nil
|
|
||||||
|
|
||||||
|
|
||||||
|
|
207
vendor/github.com/mailru/easyjson/buffer/pool.go
generated
vendored
207
vendor/github.com/mailru/easyjson/buffer/pool.go
generated
vendored
@ -1,207 +0,0 @@
|
|||||||
// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
|
|
||||||
// reduce copying and to allow reuse of individual chunks.
|
|
||||||
package buffer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PoolConfig contains configuration for the allocation and reuse strategy.
|
|
||||||
type PoolConfig struct {
|
|
||||||
StartSize int // Minimum chunk size that is allocated.
|
|
||||||
PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
|
|
||||||
MaxSize int // Maximum chunk size that will be allocated.
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = PoolConfig{
|
|
||||||
StartSize: 128,
|
|
||||||
PooledSize: 512,
|
|
||||||
MaxSize: 32768,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reuse pool: chunk size -> pool.
|
|
||||||
var buffers = map[int]*sync.Pool{}
|
|
||||||
|
|
||||||
func initBuffers() {
|
|
||||||
for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
|
|
||||||
buffers[l] = new(sync.Pool)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
initBuffers()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
|
|
||||||
func Init(cfg PoolConfig) {
|
|
||||||
config = cfg
|
|
||||||
initBuffers()
|
|
||||||
}
|
|
||||||
|
|
||||||
// putBuf puts a chunk to reuse pool if it can be reused.
|
|
||||||
func putBuf(buf []byte) {
|
|
||||||
size := cap(buf)
|
|
||||||
if size < config.PooledSize {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c := buffers[size]; c != nil {
|
|
||||||
c.Put(buf[:0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
|
|
||||||
func getBuf(size int) []byte {
|
|
||||||
if size < config.PooledSize {
|
|
||||||
return make([]byte, 0, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c := buffers[size]; c != nil {
|
|
||||||
v := c.Get()
|
|
||||||
if v != nil {
|
|
||||||
return v.([]byte)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return make([]byte, 0, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffer is a buffer optimized for serialization without extra copying.
|
|
||||||
type Buffer struct {
|
|
||||||
|
|
||||||
// Buf is the current chunk that can be used for serialization.
|
|
||||||
Buf []byte
|
|
||||||
|
|
||||||
toPool []byte
|
|
||||||
bufs [][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureSpace makes sure that the current chunk contains at least s free bytes,
|
|
||||||
// possibly creating a new chunk.
|
|
||||||
func (b *Buffer) EnsureSpace(s int) {
|
|
||||||
if cap(b.Buf)-len(b.Buf) >= s {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
l := len(b.Buf)
|
|
||||||
if l > 0 {
|
|
||||||
if cap(b.toPool) != cap(b.Buf) {
|
|
||||||
// Chunk was reallocated, toPool can be pooled.
|
|
||||||
putBuf(b.toPool)
|
|
||||||
}
|
|
||||||
if cap(b.bufs) == 0 {
|
|
||||||
b.bufs = make([][]byte, 0, 8)
|
|
||||||
}
|
|
||||||
b.bufs = append(b.bufs, b.Buf)
|
|
||||||
l = cap(b.toPool) * 2
|
|
||||||
} else {
|
|
||||||
l = config.StartSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if l > config.MaxSize {
|
|
||||||
l = config.MaxSize
|
|
||||||
}
|
|
||||||
b.Buf = getBuf(l)
|
|
||||||
b.toPool = b.Buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendByte appends a single byte to buffer.
|
|
||||||
func (b *Buffer) AppendByte(data byte) {
|
|
||||||
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
|
|
||||||
b.EnsureSpace(1)
|
|
||||||
}
|
|
||||||
b.Buf = append(b.Buf, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBytes appends a byte slice to buffer.
|
|
||||||
func (b *Buffer) AppendBytes(data []byte) {
|
|
||||||
for len(data) > 0 {
|
|
||||||
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
|
|
||||||
b.EnsureSpace(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
sz := cap(b.Buf) - len(b.Buf)
|
|
||||||
if sz > len(data) {
|
|
||||||
sz = len(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.Buf = append(b.Buf, data[:sz]...)
|
|
||||||
data = data[sz:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBytes appends a string to buffer.
|
|
||||||
func (b *Buffer) AppendString(data string) {
|
|
||||||
for len(data) > 0 {
|
|
||||||
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
|
|
||||||
b.EnsureSpace(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
sz := cap(b.Buf) - len(b.Buf)
|
|
||||||
if sz > len(data) {
|
|
||||||
sz = len(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.Buf = append(b.Buf, data[:sz]...)
|
|
||||||
data = data[sz:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size computes the size of a buffer by adding sizes of every chunk.
|
|
||||||
func (b *Buffer) Size() int {
|
|
||||||
size := len(b.Buf)
|
|
||||||
for _, buf := range b.bufs {
|
|
||||||
size += len(buf)
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
|
|
||||||
func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
|
|
||||||
var n int
|
|
||||||
for _, buf := range b.bufs {
|
|
||||||
if err == nil {
|
|
||||||
n, err = w.Write(buf)
|
|
||||||
written += n
|
|
||||||
}
|
|
||||||
putBuf(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
n, err = w.Write(b.Buf)
|
|
||||||
written += n
|
|
||||||
}
|
|
||||||
putBuf(b.toPool)
|
|
||||||
|
|
||||||
b.bufs = nil
|
|
||||||
b.Buf = nil
|
|
||||||
b.toPool = nil
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
|
|
||||||
// copied if it does not fit in a single chunk.
|
|
||||||
func (b *Buffer) BuildBytes() []byte {
|
|
||||||
if len(b.bufs) == 0 {
|
|
||||||
|
|
||||||
ret := b.Buf
|
|
||||||
b.toPool = nil
|
|
||||||
b.Buf = nil
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := make([]byte, 0, b.Size())
|
|
||||||
for _, buf := range b.bufs {
|
|
||||||
ret = append(ret, buf...)
|
|
||||||
putBuf(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = append(ret, b.Buf...)
|
|
||||||
putBuf(b.toPool)
|
|
||||||
|
|
||||||
b.bufs = nil
|
|
||||||
b.toPool = nil
|
|
||||||
b.Buf = nil
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
15
vendor/github.com/mailru/easyjson/jlexer/error.go
generated
vendored
15
vendor/github.com/mailru/easyjson/jlexer/error.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
package jlexer
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// LexerError implements the error interface and represents all possible errors that can be
|
|
||||||
// generated during parsing the JSON data.
|
|
||||||
type LexerError struct {
|
|
||||||
Reason string
|
|
||||||
Offset int
|
|
||||||
Data string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerError) Error() string {
|
|
||||||
return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
|
|
||||||
}
|
|
956
vendor/github.com/mailru/easyjson/jlexer/lexer.go
generated
vendored
956
vendor/github.com/mailru/easyjson/jlexer/lexer.go
generated
vendored
@ -1,956 +0,0 @@
|
|||||||
// Package jlexer contains a JSON lexer implementation.
|
|
||||||
//
|
|
||||||
// It is expected that it is mostly used with generated parser code, so the interface is tuned
|
|
||||||
// for a parser that knows what kind of data is expected.
|
|
||||||
package jlexer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"unicode/utf8"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// tokenKind determines type of a token.
|
|
||||||
type tokenKind byte
|
|
||||||
|
|
||||||
const (
|
|
||||||
tokenUndef tokenKind = iota // No token.
|
|
||||||
tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
|
|
||||||
tokenString // A string literal, e.g. "abc\u1234"
|
|
||||||
tokenNumber // Number literal, e.g. 1.5e5
|
|
||||||
tokenBool // Boolean literal: true or false.
|
|
||||||
tokenNull // null keyword.
|
|
||||||
)
|
|
||||||
|
|
||||||
// token describes a single token: type, position in the input and value.
|
|
||||||
type token struct {
|
|
||||||
kind tokenKind // Type of a token.
|
|
||||||
|
|
||||||
boolValue bool // Value if a boolean literal token.
|
|
||||||
byteValue []byte // Raw value of a token.
|
|
||||||
delimValue byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
|
|
||||||
type Lexer struct {
|
|
||||||
Data []byte // Input data given to the lexer.
|
|
||||||
|
|
||||||
start int // Start of the current token.
|
|
||||||
pos int // Current unscanned position in the input stream.
|
|
||||||
token token // Last scanned token, if token.kind != tokenUndef.
|
|
||||||
|
|
||||||
firstElement bool // Whether current element is the first in array or an object.
|
|
||||||
wantSep byte // A comma or a colon character, which need to occur before a token.
|
|
||||||
|
|
||||||
err error // Error encountered during lexing, if any.
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchToken scans the input for the next token.
|
|
||||||
func (r *Lexer) fetchToken() {
|
|
||||||
r.token.kind = tokenUndef
|
|
||||||
r.start = r.pos
|
|
||||||
|
|
||||||
// Check if r.Data has r.pos element
|
|
||||||
// If it doesn't, it mean corrupted input data
|
|
||||||
if len(r.Data) < r.pos {
|
|
||||||
r.errParse("Unexpected end of data")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Determine the type of a token by skipping whitespace and reading the
|
|
||||||
// first character.
|
|
||||||
for _, c := range r.Data[r.pos:] {
|
|
||||||
switch c {
|
|
||||||
case ':', ',':
|
|
||||||
if r.wantSep == c {
|
|
||||||
r.pos++
|
|
||||||
r.start++
|
|
||||||
r.wantSep = 0
|
|
||||||
} else {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
|
|
||||||
case ' ', '\t', '\r', '\n':
|
|
||||||
r.pos++
|
|
||||||
r.start++
|
|
||||||
|
|
||||||
case '"':
|
|
||||||
if r.wantSep != 0 {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.token.kind = tokenString
|
|
||||||
r.fetchString()
|
|
||||||
return
|
|
||||||
|
|
||||||
case '{', '[':
|
|
||||||
if r.wantSep != 0 {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
r.firstElement = true
|
|
||||||
r.token.kind = tokenDelim
|
|
||||||
r.token.delimValue = r.Data[r.pos]
|
|
||||||
r.pos++
|
|
||||||
return
|
|
||||||
|
|
||||||
case '}', ']':
|
|
||||||
if !r.firstElement && (r.wantSep != ',') {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
r.wantSep = 0
|
|
||||||
r.token.kind = tokenDelim
|
|
||||||
r.token.delimValue = r.Data[r.pos]
|
|
||||||
r.pos++
|
|
||||||
return
|
|
||||||
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
|
|
||||||
if r.wantSep != 0 {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
r.token.kind = tokenNumber
|
|
||||||
r.fetchNumber()
|
|
||||||
return
|
|
||||||
|
|
||||||
case 'n':
|
|
||||||
if r.wantSep != 0 {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.token.kind = tokenNull
|
|
||||||
r.fetchNull()
|
|
||||||
return
|
|
||||||
|
|
||||||
case 't':
|
|
||||||
if r.wantSep != 0 {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.token.kind = tokenBool
|
|
||||||
r.token.boolValue = true
|
|
||||||
r.fetchTrue()
|
|
||||||
return
|
|
||||||
|
|
||||||
case 'f':
|
|
||||||
if r.wantSep != 0 {
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.token.kind = tokenBool
|
|
||||||
r.token.boolValue = false
|
|
||||||
r.fetchFalse()
|
|
||||||
return
|
|
||||||
|
|
||||||
default:
|
|
||||||
r.errSyntax()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.err = io.EOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// isTokenEnd returns true if the char can follow a non-delimiter token
|
|
||||||
func isTokenEnd(c byte) bool {
|
|
||||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchNull fetches and checks remaining bytes of null keyword.
|
|
||||||
func (r *Lexer) fetchNull() {
|
|
||||||
r.pos += 4
|
|
||||||
if r.pos > len(r.Data) ||
|
|
||||||
r.Data[r.pos-3] != 'u' ||
|
|
||||||
r.Data[r.pos-2] != 'l' ||
|
|
||||||
r.Data[r.pos-1] != 'l' ||
|
|
||||||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
|
|
||||||
|
|
||||||
r.pos -= 4
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchTrue fetches and checks remaining bytes of true keyword.
|
|
||||||
func (r *Lexer) fetchTrue() {
|
|
||||||
r.pos += 4
|
|
||||||
if r.pos > len(r.Data) ||
|
|
||||||
r.Data[r.pos-3] != 'r' ||
|
|
||||||
r.Data[r.pos-2] != 'u' ||
|
|
||||||
r.Data[r.pos-1] != 'e' ||
|
|
||||||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
|
|
||||||
|
|
||||||
r.pos -= 4
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchFalse fetches and checks remaining bytes of false keyword.
|
|
||||||
func (r *Lexer) fetchFalse() {
|
|
||||||
r.pos += 5
|
|
||||||
if r.pos > len(r.Data) ||
|
|
||||||
r.Data[r.pos-4] != 'a' ||
|
|
||||||
r.Data[r.pos-3] != 'l' ||
|
|
||||||
r.Data[r.pos-2] != 's' ||
|
|
||||||
r.Data[r.pos-1] != 'e' ||
|
|
||||||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
|
|
||||||
|
|
||||||
r.pos -= 5
|
|
||||||
r.errSyntax()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// bytesToStr creates a string pointing at the slice to avoid copying.
|
|
||||||
//
|
|
||||||
// Warning: the string returned by the function should be used with care, as the whole input data
|
|
||||||
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
|
|
||||||
// may be garbage-collected even when the string exists.
|
|
||||||
func bytesToStr(data []byte) string {
|
|
||||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
|
|
||||||
shdr := reflect.StringHeader{h.Data, h.Len}
|
|
||||||
return *(*string)(unsafe.Pointer(&shdr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchNumber scans a number literal token.
|
|
||||||
func (r *Lexer) fetchNumber() {
|
|
||||||
hasE := false
|
|
||||||
afterE := false
|
|
||||||
hasDot := false
|
|
||||||
|
|
||||||
r.pos++
|
|
||||||
for i, c := range r.Data[r.pos:] {
|
|
||||||
switch {
|
|
||||||
case c >= '0' && c <= '9':
|
|
||||||
afterE = false
|
|
||||||
case c == '.' && !hasDot:
|
|
||||||
hasDot = true
|
|
||||||
case (c == 'e' || c == 'E') && !hasE:
|
|
||||||
hasE = true
|
|
||||||
hasDot = true
|
|
||||||
afterE = true
|
|
||||||
case (c == '+' || c == '-') && afterE:
|
|
||||||
afterE = false
|
|
||||||
default:
|
|
||||||
r.pos += i
|
|
||||||
if !isTokenEnd(c) {
|
|
||||||
r.errSyntax()
|
|
||||||
} else {
|
|
||||||
r.token.byteValue = r.Data[r.start:r.pos]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.pos = len(r.Data)
|
|
||||||
r.token.byteValue = r.Data[r.start:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// findStringLen tries to scan into the string literal for ending quote char to determine required size.
|
|
||||||
// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
|
|
||||||
func findStringLen(data []byte) (hasEscapes bool, length int) {
|
|
||||||
delta := 0
|
|
||||||
|
|
||||||
for i := 0; i < len(data); i++ {
|
|
||||||
switch data[i] {
|
|
||||||
case '\\':
|
|
||||||
i++
|
|
||||||
delta++
|
|
||||||
if i < len(data) && data[i] == 'u' {
|
|
||||||
delta++
|
|
||||||
}
|
|
||||||
case '"':
|
|
||||||
return (delta > 0), (i - delta)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, len(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processEscape processes a single escape sequence and returns number of bytes processed.
|
|
||||||
func (r *Lexer) processEscape(data []byte) (int, error) {
|
|
||||||
if len(data) < 2 {
|
|
||||||
return 0, fmt.Errorf("syntax error at %v", string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
c := data[1]
|
|
||||||
switch c {
|
|
||||||
case '"', '/', '\\':
|
|
||||||
r.token.byteValue = append(r.token.byteValue, c)
|
|
||||||
return 2, nil
|
|
||||||
case 'b':
|
|
||||||
r.token.byteValue = append(r.token.byteValue, '\b')
|
|
||||||
return 2, nil
|
|
||||||
case 'f':
|
|
||||||
r.token.byteValue = append(r.token.byteValue, '\f')
|
|
||||||
return 2, nil
|
|
||||||
case 'n':
|
|
||||||
r.token.byteValue = append(r.token.byteValue, '\n')
|
|
||||||
return 2, nil
|
|
||||||
case 'r':
|
|
||||||
r.token.byteValue = append(r.token.byteValue, '\r')
|
|
||||||
return 2, nil
|
|
||||||
case 't':
|
|
||||||
r.token.byteValue = append(r.token.byteValue, '\t')
|
|
||||||
return 2, nil
|
|
||||||
case 'u':
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("syntax error")
|
|
||||||
}
|
|
||||||
|
|
||||||
var val rune
|
|
||||||
|
|
||||||
for i := 2; i < len(data) && i < 6; i++ {
|
|
||||||
var v byte
|
|
||||||
c = data[i]
|
|
||||||
switch c {
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
|
||||||
v = c - '0'
|
|
||||||
case 'a', 'b', 'c', 'd', 'e', 'f':
|
|
||||||
v = c - 'a' + 10
|
|
||||||
case 'A', 'B', 'C', 'D', 'E', 'F':
|
|
||||||
v = c - 'A' + 10
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("syntax error")
|
|
||||||
}
|
|
||||||
|
|
||||||
val <<= 4
|
|
||||||
val |= rune(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
l := utf8.RuneLen(val)
|
|
||||||
if l == -1 {
|
|
||||||
return 0, fmt.Errorf("invalid unicode escape")
|
|
||||||
}
|
|
||||||
|
|
||||||
var d [4]byte
|
|
||||||
utf8.EncodeRune(d[:], val)
|
|
||||||
r.token.byteValue = append(r.token.byteValue, d[:l]...)
|
|
||||||
return 6, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchString scans a string literal token.
|
|
||||||
func (r *Lexer) fetchString() {
|
|
||||||
r.pos++
|
|
||||||
data := r.Data[r.pos:]
|
|
||||||
|
|
||||||
hasEscapes, length := findStringLen(data)
|
|
||||||
if !hasEscapes {
|
|
||||||
r.token.byteValue = data[:length]
|
|
||||||
r.pos += length + 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.token.byteValue = make([]byte, 0, length)
|
|
||||||
p := 0
|
|
||||||
for i := 0; i < len(data); {
|
|
||||||
switch data[i] {
|
|
||||||
case '"':
|
|
||||||
r.pos += i + 1
|
|
||||||
r.token.byteValue = append(r.token.byteValue, data[p:i]...)
|
|
||||||
i++
|
|
||||||
return
|
|
||||||
|
|
||||||
case '\\':
|
|
||||||
r.token.byteValue = append(r.token.byteValue, data[p:i]...)
|
|
||||||
off, err := r.processEscape(data[i:])
|
|
||||||
if err != nil {
|
|
||||||
r.errParse(err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
i += off
|
|
||||||
p = i
|
|
||||||
|
|
||||||
default:
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.errParse("unterminated string literal")
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanToken scans the next token if no token is currently available in the lexer.
|
|
||||||
func (r *Lexer) scanToken() {
|
|
||||||
if r.token.kind != tokenUndef || r.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
|
|
||||||
// consume resets the current token to allow scanning the next one.
|
|
||||||
func (r *Lexer) consume() {
|
|
||||||
r.token.kind = tokenUndef
|
|
||||||
r.token.delimValue = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ok returns true if no error (including io.EOF) was encountered during scanning.
|
|
||||||
func (r *Lexer) Ok() bool {
|
|
||||||
return r.err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxErrorContextLen = 13
|
|
||||||
|
|
||||||
func (r *Lexer) errParse(what string) {
|
|
||||||
if r.err == nil {
|
|
||||||
var str string
|
|
||||||
if len(r.Data)-r.pos <= maxErrorContextLen {
|
|
||||||
str = string(r.Data)
|
|
||||||
} else {
|
|
||||||
str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
|
|
||||||
}
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: what,
|
|
||||||
Offset: r.pos,
|
|
||||||
Data: str,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) errSyntax() {
|
|
||||||
r.errParse("syntax error")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) errInvalidToken(expected string) {
|
|
||||||
if r.err == nil {
|
|
||||||
var str string
|
|
||||||
if len(r.token.byteValue) <= maxErrorContextLen {
|
|
||||||
str = string(r.token.byteValue)
|
|
||||||
} else {
|
|
||||||
str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
|
|
||||||
}
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: fmt.Sprintf("expected %s", expected),
|
|
||||||
Offset: r.pos,
|
|
||||||
Data: str,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delim consumes a token and verifies that it is the given delimiter.
|
|
||||||
func (r *Lexer) Delim(c byte) {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
if !r.Ok() || r.token.delimValue != c {
|
|
||||||
r.errInvalidToken(string([]byte{c}))
|
|
||||||
}
|
|
||||||
r.consume()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDelim returns true if there was no scanning error and next token is the given delimiter.
|
|
||||||
func (r *Lexer) IsDelim(c byte) bool {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
return !r.Ok() || r.token.delimValue == c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Null verifies that the next token is null and consumes it.
|
|
||||||
func (r *Lexer) Null() {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
if !r.Ok() || r.token.kind != tokenNull {
|
|
||||||
r.errInvalidToken("null")
|
|
||||||
}
|
|
||||||
r.consume()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNull returns true if the next token is a null keyword.
|
|
||||||
func (r *Lexer) IsNull() bool {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
return r.Ok() && r.token.kind == tokenNull
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip skips a single token.
|
|
||||||
func (r *Lexer) Skip() {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
r.consume()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SkipRecursive skips next array or object completely, or just skips a single token if not
|
|
||||||
// an array/object.
|
|
||||||
//
|
|
||||||
// Note: no syntax validation is performed on the skipped data.
|
|
||||||
func (r *Lexer) SkipRecursive() {
|
|
||||||
r.scanToken()
|
|
||||||
|
|
||||||
var start, end byte
|
|
||||||
|
|
||||||
if r.token.delimValue == '{' {
|
|
||||||
start, end = '{', '}'
|
|
||||||
} else if r.token.delimValue == '[' {
|
|
||||||
start, end = '[', ']'
|
|
||||||
} else {
|
|
||||||
r.consume()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.consume()
|
|
||||||
|
|
||||||
level := 1
|
|
||||||
inQuotes := false
|
|
||||||
wasEscape := false
|
|
||||||
|
|
||||||
for i, c := range r.Data[r.pos:] {
|
|
||||||
switch {
|
|
||||||
case c == start && !inQuotes:
|
|
||||||
level++
|
|
||||||
case c == end && !inQuotes:
|
|
||||||
level--
|
|
||||||
if level == 0 {
|
|
||||||
r.pos += i + 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case c == '\\' && inQuotes:
|
|
||||||
wasEscape = true
|
|
||||||
continue
|
|
||||||
case c == '"' && inQuotes:
|
|
||||||
inQuotes = wasEscape
|
|
||||||
case c == '"':
|
|
||||||
inQuotes = true
|
|
||||||
}
|
|
||||||
wasEscape = false
|
|
||||||
}
|
|
||||||
r.pos = len(r.Data)
|
|
||||||
r.err = io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Raw fetches the next item recursively as a data slice
|
|
||||||
func (r *Lexer) Raw() []byte {
|
|
||||||
r.SkipRecursive()
|
|
||||||
if !r.Ok() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return r.Data[r.start:r.pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnsafeString returns the string value if the token is a string literal.
|
|
||||||
//
|
|
||||||
// Warning: returned string may point to the input buffer, so the string should not outlive
|
|
||||||
// the input buffer. Intended pattern of usage is as an argument to a switch statement.
|
|
||||||
func (r *Lexer) UnsafeString() string {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
if !r.Ok() || r.token.kind != tokenString {
|
|
||||||
r.errInvalidToken("string")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := bytesToStr(r.token.byteValue)
|
|
||||||
r.consume()
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// String reads a string literal.
|
|
||||||
func (r *Lexer) String() string {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
if !r.Ok() || r.token.kind != tokenString {
|
|
||||||
r.errInvalidToken("string")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
}
|
|
||||||
ret := string(r.token.byteValue)
|
|
||||||
r.consume()
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool reads a true or false boolean keyword.
|
|
||||||
func (r *Lexer) Bool() bool {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
if !r.Ok() || r.token.kind != tokenBool {
|
|
||||||
r.errInvalidToken("bool")
|
|
||||||
return false
|
|
||||||
|
|
||||||
}
|
|
||||||
ret := r.token.boolValue
|
|
||||||
r.consume()
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) number() string {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
if !r.Ok() || r.token.kind != tokenNumber {
|
|
||||||
r.errInvalidToken("number")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
}
|
|
||||||
ret := bytesToStr(r.token.byteValue)
|
|
||||||
r.consume()
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint8() uint8 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 8)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint8(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint16() uint16 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint16(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint32() uint32 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint32(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint64() uint64 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint() uint {
|
|
||||||
return uint(r.Uint64())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int8() int8 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 8)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int8(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int16() int16 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int16(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int32() int32 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int32(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int64() int64 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int() int {
|
|
||||||
return int(r.Int64())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint8Str() uint8 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 8)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint8(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint16Str() uint16 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint16(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint32Str() uint32 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint32(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Uint64Str() uint64 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseUint(s, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) UintStr() uint {
|
|
||||||
return uint(r.Uint64Str())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int8Str() int8 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 8)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int8(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int16Str() int16 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int16(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int32Str() int32 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int32(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Int64Str() int64 {
|
|
||||||
s := r.UnsafeString()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseInt(s, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) IntStr() int {
|
|
||||||
return int(r.Int64Str())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Float32() float32 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseFloat(s, 32)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return float32(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Float64() float64 {
|
|
||||||
s := r.number()
|
|
||||||
if !r.Ok() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.ParseFloat(s, 64)
|
|
||||||
if err != nil {
|
|
||||||
r.err = &LexerError{
|
|
||||||
Reason: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) Error() error {
|
|
||||||
return r.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Lexer) AddError(e error) {
|
|
||||||
if r.err == nil {
|
|
||||||
r.err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interface fetches an interface{} analogous to the 'encoding/json' package.
|
|
||||||
func (r *Lexer) Interface() interface{} {
|
|
||||||
if r.token.kind == tokenUndef && r.Ok() {
|
|
||||||
r.fetchToken()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !r.Ok() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch r.token.kind {
|
|
||||||
case tokenString:
|
|
||||||
return r.String()
|
|
||||||
case tokenNumber:
|
|
||||||
return r.Float64()
|
|
||||||
case tokenBool:
|
|
||||||
return r.Bool()
|
|
||||||
case tokenNull:
|
|
||||||
r.Null()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.token.delimValue == '{' {
|
|
||||||
r.consume()
|
|
||||||
|
|
||||||
ret := map[string]interface{}{}
|
|
||||||
for !r.IsDelim('}') {
|
|
||||||
key := r.String()
|
|
||||||
r.WantColon()
|
|
||||||
ret[key] = r.Interface()
|
|
||||||
r.WantComma()
|
|
||||||
}
|
|
||||||
r.Delim('}')
|
|
||||||
|
|
||||||
if r.Ok() {
|
|
||||||
return ret
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else if r.token.delimValue == '[' {
|
|
||||||
r.consume()
|
|
||||||
|
|
||||||
var ret []interface{}
|
|
||||||
for !r.IsDelim(']') {
|
|
||||||
ret = append(ret, r.Interface())
|
|
||||||
r.WantComma()
|
|
||||||
}
|
|
||||||
r.Delim(']')
|
|
||||||
|
|
||||||
if r.Ok() {
|
|
||||||
return ret
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.errSyntax()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WantComma requires a comma to be present before fetching next token.
|
|
||||||
func (r *Lexer) WantComma() {
|
|
||||||
r.wantSep = ','
|
|
||||||
r.firstElement = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// WantColon requires a colon to be present before fetching next token.
|
|
||||||
func (r *Lexer) WantColon() {
|
|
||||||
r.wantSep = ':'
|
|
||||||
r.firstElement = false
|
|
||||||
}
|
|
273
vendor/github.com/mailru/easyjson/jwriter/writer.go
generated
vendored
273
vendor/github.com/mailru/easyjson/jwriter/writer.go
generated
vendored
@ -1,273 +0,0 @@
|
|||||||
// Package jwriter contains a JSON writer.
|
|
||||||
package jwriter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/mailru/easyjson/buffer"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Writer is a JSON writer.
|
|
||||||
type Writer struct {
|
|
||||||
Error error
|
|
||||||
Buffer buffer.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the data that was written out.
|
|
||||||
func (w *Writer) Size() int {
|
|
||||||
return w.Buffer.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DumpTo outputs the data to given io.Writer, resetting the buffer.
|
|
||||||
func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
|
|
||||||
return w.Buffer.DumpTo(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildBytes returns writer data as a single byte slice.
|
|
||||||
func (w *Writer) BuildBytes() ([]byte, error) {
|
|
||||||
if w.Error != nil {
|
|
||||||
return nil, w.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.Buffer.BuildBytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawByte appends raw binary data to the buffer.
|
|
||||||
func (w *Writer) RawByte(c byte) {
|
|
||||||
w.Buffer.AppendByte(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawByte appends raw binary data to the buffer.
|
|
||||||
func (w *Writer) RawString(s string) {
|
|
||||||
w.Buffer.AppendString(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawByte appends raw binary data to the buffer or sets the error if it is given. Useful for
|
|
||||||
// calling with results of MarshalJSON-like functions.
|
|
||||||
func (w *Writer) Raw(data []byte, err error) {
|
|
||||||
switch {
|
|
||||||
case w.Error != nil:
|
|
||||||
return
|
|
||||||
case err != nil:
|
|
||||||
w.Error = err
|
|
||||||
case len(data) > 0:
|
|
||||||
w.Buffer.AppendBytes(data)
|
|
||||||
default:
|
|
||||||
w.RawString("null")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint8(n uint8) {
|
|
||||||
w.Buffer.EnsureSpace(3)
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint16(n uint16) {
|
|
||||||
w.Buffer.EnsureSpace(5)
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint32(n uint32) {
|
|
||||||
w.Buffer.EnsureSpace(10)
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint(n uint) {
|
|
||||||
w.Buffer.EnsureSpace(20)
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint64(n uint64) {
|
|
||||||
w.Buffer.EnsureSpace(20)
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int8(n int8) {
|
|
||||||
w.Buffer.EnsureSpace(4)
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int16(n int16) {
|
|
||||||
w.Buffer.EnsureSpace(6)
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int32(n int32) {
|
|
||||||
w.Buffer.EnsureSpace(11)
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int(n int) {
|
|
||||||
w.Buffer.EnsureSpace(21)
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int64(n int64) {
|
|
||||||
w.Buffer.EnsureSpace(21)
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint8Str(n uint8) {
|
|
||||||
w.Buffer.EnsureSpace(3)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint16Str(n uint16) {
|
|
||||||
w.Buffer.EnsureSpace(5)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint32Str(n uint32) {
|
|
||||||
w.Buffer.EnsureSpace(10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) UintStr(n uint) {
|
|
||||||
w.Buffer.EnsureSpace(20)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Uint64Str(n uint64) {
|
|
||||||
w.Buffer.EnsureSpace(20)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int8Str(n int8) {
|
|
||||||
w.Buffer.EnsureSpace(4)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int16Str(n int16) {
|
|
||||||
w.Buffer.EnsureSpace(6)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int32Str(n int32) {
|
|
||||||
w.Buffer.EnsureSpace(11)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) IntStr(n int) {
|
|
||||||
w.Buffer.EnsureSpace(21)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Int64Str(n int64) {
|
|
||||||
w.Buffer.EnsureSpace(21)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Float32(n float32) {
|
|
||||||
w.Buffer.EnsureSpace(20)
|
|
||||||
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Float64(n float64) {
|
|
||||||
w.Buffer.EnsureSpace(20)
|
|
||||||
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Bool(v bool) {
|
|
||||||
w.Buffer.EnsureSpace(5)
|
|
||||||
if v {
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
|
|
||||||
} else {
|
|
||||||
w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const chars = "0123456789abcdef"
|
|
||||||
|
|
||||||
func (w *Writer) String(s string) {
|
|
||||||
w.Buffer.AppendByte('"')
|
|
||||||
|
|
||||||
// Portions of the string that contain no escapes are appended as
|
|
||||||
// byte slices.
|
|
||||||
|
|
||||||
p := 0 // last non-escape symbol
|
|
||||||
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
// single-with character
|
|
||||||
if c := s[i]; c < utf8.RuneSelf {
|
|
||||||
var escape byte
|
|
||||||
switch c {
|
|
||||||
case '\t':
|
|
||||||
escape = 't'
|
|
||||||
case '\r':
|
|
||||||
escape = 'r'
|
|
||||||
case '\n':
|
|
||||||
escape = 'n'
|
|
||||||
case '\\':
|
|
||||||
escape = '\\'
|
|
||||||
case '"':
|
|
||||||
escape = '"'
|
|
||||||
case '<', '>':
|
|
||||||
// do nothing
|
|
||||||
default:
|
|
||||||
if c >= 0x20 {
|
|
||||||
// no escaping is required
|
|
||||||
i++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if escape != 0 {
|
|
||||||
w.Buffer.AppendString(s[p:i])
|
|
||||||
w.Buffer.AppendByte('\\')
|
|
||||||
w.Buffer.AppendByte(escape)
|
|
||||||
} else {
|
|
||||||
w.Buffer.AppendString(s[p:i])
|
|
||||||
w.Buffer.AppendString(`\u00`)
|
|
||||||
w.Buffer.AppendByte(chars[c>>4])
|
|
||||||
w.Buffer.AppendByte(chars[c&0xf])
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
p = i
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// broken utf
|
|
||||||
runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
|
|
||||||
if runeValue == utf8.RuneError && runeWidth == 1 {
|
|
||||||
w.Buffer.AppendString(s[p:i])
|
|
||||||
w.Buffer.AppendString(`\ufffd`)
|
|
||||||
i++
|
|
||||||
p = i
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// jsonp stuff - tab separator and line separator
|
|
||||||
if runeValue == '\u2028' || runeValue == '\u2029' {
|
|
||||||
w.Buffer.AppendString(s[p:i])
|
|
||||||
w.Buffer.AppendString(`\u202`)
|
|
||||||
w.Buffer.AppendByte(chars[runeValue&0xf])
|
|
||||||
i += runeWidth
|
|
||||||
p = i
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i += runeWidth
|
|
||||||
}
|
|
||||||
w.Buffer.AppendString(s[p:])
|
|
||||||
w.Buffer.AppendByte('"')
|
|
||||||
}
|
|
19
vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
19
vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
Copyright (c) 2011-2012 Peter Bourgon
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
@ -1,141 +0,0 @@
|
|||||||
# What is diskv?
|
|
||||||
|
|
||||||
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
|
|
||||||
language. It starts with an incredibly simple API for storing arbitrary data on
|
|
||||||
a filesystem by key, and builds several layers of performance-enhancing
|
|
||||||
abstraction on top. The end result is a conceptually simple, but highly
|
|
||||||
performant, disk-backed storage system.
|
|
||||||
|
|
||||||
[![Build Status][1]][2]
|
|
||||||
|
|
||||||
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
|
|
||||||
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
|
|
||||||
|
|
||||||
|
|
||||||
# Installing
|
|
||||||
|
|
||||||
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
|
|
||||||
Then,
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ go get github.com/peterbourgon/diskv
|
|
||||||
```
|
|
||||||
|
|
||||||
[3]: http://golang.org
|
|
||||||
[4]: http://golang.org/doc/install/source
|
|
||||||
[5]: http://golang.org/doc/install
|
|
||||||
|
|
||||||
|
|
||||||
# Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/peterbourgon/diskv"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Simplest transform function: put all the data files into the base dir.
|
|
||||||
flatTransform := func(s string) []string { return []string{} }
|
|
||||||
|
|
||||||
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
|
|
||||||
d := diskv.New(diskv.Options{
|
|
||||||
BasePath: "my-data-dir",
|
|
||||||
Transform: flatTransform,
|
|
||||||
CacheSizeMax: 1024 * 1024,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Write three bytes to the key "alpha".
|
|
||||||
key := "alpha"
|
|
||||||
d.Write(key, []byte{'1', '2', '3'})
|
|
||||||
|
|
||||||
// Read the value back out of the store.
|
|
||||||
value, _ := d.Read(key)
|
|
||||||
fmt.Printf("%v\n", value)
|
|
||||||
|
|
||||||
// Erase the key+value from the store (and the disk).
|
|
||||||
d.Erase(key)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
More complex examples can be found in the "examples" subdirectory.
|
|
||||||
|
|
||||||
|
|
||||||
# Theory
|
|
||||||
|
|
||||||
## Basic idea
|
|
||||||
|
|
||||||
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
|
|
||||||
The data is written to a single file on disk, with the same name as the key.
|
|
||||||
The key determines where that file will be stored, via a user-provided
|
|
||||||
`TransformFunc`, which takes a key and returns a slice (`[]string`)
|
|
||||||
corresponding to a path list where the key file will be stored. The simplest
|
|
||||||
TransformFunc,
|
|
||||||
|
|
||||||
```go
|
|
||||||
func SimpleTransform (key string) []string {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
will place all keys in the same, base directory. The design is inspired by
|
|
||||||
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
|
|
||||||
behavior is available in the content-addressable-storage example.
|
|
||||||
|
|
||||||
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
|
|
||||||
|
|
||||||
**Note** that your TransformFunc should ensure that one valid key doesn't
|
|
||||||
transform to a subset of another valid key. That is, it shouldn't be possible
|
|
||||||
to construct valid keys that resolve to directory names. As a concrete example,
|
|
||||||
if your TransformFunc splits on every 3 characters, then
|
|
||||||
|
|
||||||
```go
|
|
||||||
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
|
|
||||||
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
|
|
||||||
```
|
|
||||||
|
|
||||||
This will be addressed in an upcoming version of diskv.
|
|
||||||
|
|
||||||
Probably the most important design principle behind diskv is that your data is
|
|
||||||
always flatly available on the disk. diskv will never do anything that would
|
|
||||||
prevent you from accessing, copying, backing up, or otherwise interacting with
|
|
||||||
your data via common UNIX commandline tools.
|
|
||||||
|
|
||||||
## Adding a cache
|
|
||||||
|
|
||||||
An in-memory caching layer is provided by combining the BasicStore
|
|
||||||
functionality with a simple map structure, and keeping it up-to-date as
|
|
||||||
appropriate. Since the map structure in Go is not threadsafe, it's combined
|
|
||||||
with a RWMutex to provide safe concurrent access.
|
|
||||||
|
|
||||||
## Adding order
|
|
||||||
|
|
||||||
diskv is a key-value store and therefore inherently unordered. An ordering
|
|
||||||
system can be injected into the store by passing something which satisfies the
|
|
||||||
diskv.Index interface. (A default implementation, using Google's
|
|
||||||
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
|
|
||||||
user-provided Less function) index of the keys, which can be queried.
|
|
||||||
|
|
||||||
[7]: https://github.com/google/btree
|
|
||||||
|
|
||||||
## Adding compression
|
|
||||||
|
|
||||||
Something which implements the diskv.Compression interface may be passed
|
|
||||||
during store creation, so that all Writes and Reads are filtered through
|
|
||||||
a compression/decompression pipeline. Several default implementations,
|
|
||||||
using stdlib compression algorithms, are provided. Note that data is cached
|
|
||||||
compressed; the cost of decompression is borne with each Read.
|
|
||||||
|
|
||||||
## Streaming
|
|
||||||
|
|
||||||
diskv also now provides ReadStream and WriteStream methods, to allow very large
|
|
||||||
data to be handled efficiently.
|
|
||||||
|
|
||||||
|
|
||||||
# Future plans
|
|
||||||
|
|
||||||
* Needs plenty of robust testing: huge datasets, etc...
|
|
||||||
* More thorough benchmarking
|
|
||||||
* Your suggestions for use-cases I haven't thought of
|
|
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
@ -1,64 +0,0 @@
|
|||||||
package diskv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/flate"
|
|
||||||
"compress/gzip"
|
|
||||||
"compress/zlib"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compression is an interface that Diskv uses to implement compression of
|
|
||||||
// data. Writer takes a destination io.Writer and returns a WriteCloser that
|
|
||||||
// compresses all data written through it. Reader takes a source io.Reader and
|
|
||||||
// returns a ReadCloser that decompresses all data read through it. You may
|
|
||||||
// define these methods on your own type, or use one of the NewCompression
|
|
||||||
// helpers.
|
|
||||||
type Compression interface {
|
|
||||||
Writer(dst io.Writer) (io.WriteCloser, error)
|
|
||||||
Reader(src io.Reader) (io.ReadCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGzipCompression returns a Gzip-based Compression.
|
|
||||||
func NewGzipCompression() Compression {
|
|
||||||
return NewGzipCompressionLevel(flate.DefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
|
|
||||||
func NewGzipCompressionLevel(level int) Compression {
|
|
||||||
return &genericCompression{
|
|
||||||
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
|
|
||||||
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewZlibCompression returns a Zlib-based Compression.
|
|
||||||
func NewZlibCompression() Compression {
|
|
||||||
return NewZlibCompressionLevel(flate.DefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
|
|
||||||
func NewZlibCompressionLevel(level int) Compression {
|
|
||||||
return NewZlibCompressionLevelDict(level, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
|
|
||||||
// level, based on the given dictionary.
|
|
||||||
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
|
|
||||||
return &genericCompression{
|
|
||||||
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
|
|
||||||
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type genericCompression struct {
|
|
||||||
wf func(w io.Writer) (io.WriteCloser, error)
|
|
||||||
rf func(r io.Reader) (io.ReadCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
|
|
||||||
return g.wf(dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
|
|
||||||
return g.rf(src)
|
|
||||||
}
|
|
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
@ -1,624 +0,0 @@
|
|||||||
// Diskv (disk-vee) is a simple, persistent, key-value store.
|
|
||||||
// It stores all data flatly on the filesystem.
|
|
||||||
|
|
||||||
package diskv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultBasePath = "diskv"
|
|
||||||
defaultFilePerm os.FileMode = 0666
|
|
||||||
defaultPathPerm os.FileMode = 0777
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultTransform = func(s string) []string { return []string{} }
|
|
||||||
errCanceled = errors.New("canceled")
|
|
||||||
errEmptyKey = errors.New("empty key")
|
|
||||||
errBadKey = errors.New("bad key")
|
|
||||||
errImportDirectory = errors.New("can't import a directory")
|
|
||||||
)
|
|
||||||
|
|
||||||
// TransformFunction transforms a key into a slice of strings, with each
|
|
||||||
// element in the slice representing a directory in the file path where the
|
|
||||||
// key's entry will eventually be stored.
|
|
||||||
//
|
|
||||||
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
|
|
||||||
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
|
|
||||||
type TransformFunction func(s string) []string
|
|
||||||
|
|
||||||
// Options define a set of properties that dictate Diskv behavior.
|
|
||||||
// All values are optional.
|
|
||||||
type Options struct {
|
|
||||||
BasePath string
|
|
||||||
Transform TransformFunction
|
|
||||||
CacheSizeMax uint64 // bytes
|
|
||||||
PathPerm os.FileMode
|
|
||||||
FilePerm os.FileMode
|
|
||||||
// If TempDir is set, it will enable filesystem atomic writes by
|
|
||||||
// writing temporary files to that location before being moved
|
|
||||||
// to BasePath.
|
|
||||||
// Note that TempDir MUST be on the same device/partition as
|
|
||||||
// BasePath.
|
|
||||||
TempDir string
|
|
||||||
|
|
||||||
Index Index
|
|
||||||
IndexLess LessFunction
|
|
||||||
|
|
||||||
Compression Compression
|
|
||||||
}
|
|
||||||
|
|
||||||
// Diskv implements the Diskv interface. You shouldn't construct Diskv
|
|
||||||
// structures directly; instead, use the New constructor.
|
|
||||||
type Diskv struct {
|
|
||||||
Options
|
|
||||||
mu sync.RWMutex
|
|
||||||
cache map[string][]byte
|
|
||||||
cacheSize uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an initialized Diskv structure, ready to use.
|
|
||||||
// If the path identified by baseDir already contains data,
|
|
||||||
// it will be accessible, but not yet cached.
|
|
||||||
func New(o Options) *Diskv {
|
|
||||||
if o.BasePath == "" {
|
|
||||||
o.BasePath = defaultBasePath
|
|
||||||
}
|
|
||||||
if o.Transform == nil {
|
|
||||||
o.Transform = defaultTransform
|
|
||||||
}
|
|
||||||
if o.PathPerm == 0 {
|
|
||||||
o.PathPerm = defaultPathPerm
|
|
||||||
}
|
|
||||||
if o.FilePerm == 0 {
|
|
||||||
o.FilePerm = defaultFilePerm
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &Diskv{
|
|
||||||
Options: o,
|
|
||||||
cache: map[string][]byte{},
|
|
||||||
cacheSize: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Index != nil && d.IndexLess != nil {
|
|
||||||
d.Index.Initialize(d.IndexLess, d.Keys(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write synchronously writes the key-value pair to disk, making it immediately
|
|
||||||
// available for reads. Write relies on the filesystem to perform an eventual
|
|
||||||
// sync to physical media. If you need stronger guarantees, see WriteStream.
|
|
||||||
func (d *Diskv) Write(key string, val []byte) error {
|
|
||||||
return d.WriteStream(key, bytes.NewBuffer(val), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteStream writes the data represented by the io.Reader to the disk, under
|
|
||||||
// the provided key. If sync is true, WriteStream performs an explicit sync on
|
|
||||||
// the file as soon as it's written.
|
|
||||||
//
|
|
||||||
// bytes.Buffer provides io.Reader semantics for basic data types.
|
|
||||||
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
|
|
||||||
if len(key) <= 0 {
|
|
||||||
return errEmptyKey
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
return d.writeStreamWithLock(key, r, sync)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createKeyFileWithLock either creates the key file directly, or
|
|
||||||
// creates a temporary file in TempDir if it is set.
|
|
||||||
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
|
|
||||||
if d.TempDir != "" {
|
|
||||||
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
|
|
||||||
return nil, fmt.Errorf("temp mkdir: %s", err)
|
|
||||||
}
|
|
||||||
f, err := ioutil.TempFile(d.TempDir, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("temp file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := f.Chmod(d.FilePerm); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return nil, fmt.Errorf("chmod: %s", err)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
|
|
||||||
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("open file: %s", err)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeStream does no input validation checking.
|
|
||||||
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
|
|
||||||
if err := d.ensurePathWithLock(key); err != nil {
|
|
||||||
return fmt.Errorf("ensure path: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := d.createKeyFileWithLock(key)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create key file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
wc := io.WriteCloser(&nopWriteCloser{f})
|
|
||||||
if d.Compression != nil {
|
|
||||||
wc, err = d.Compression.Writer(f)
|
|
||||||
if err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("compression writer: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(wc, r); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("i/o copy: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := wc.Close(); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("compression close: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sync {
|
|
||||||
if err := f.Sync(); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("file sync: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
return fmt.Errorf("file close: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.Name() != d.completeFilename(key) {
|
|
||||||
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("rename: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Index != nil {
|
|
||||||
d.Index.Insert(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.bustCacheWithLock(key) // cache only on read
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import imports the source file into diskv under the destination key. If the
|
|
||||||
// destination key already exists, it's overwritten. If move is true, the
|
|
||||||
// source file is removed after a successful import.
|
|
||||||
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
|
|
||||||
if dstKey == "" {
|
|
||||||
return errEmptyKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi, err := os.Stat(srcFilename); err != nil {
|
|
||||||
return err
|
|
||||||
} else if fi.IsDir() {
|
|
||||||
return errImportDirectory
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
if err := d.ensurePathWithLock(dstKey); err != nil {
|
|
||||||
return fmt.Errorf("ensure path: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if move {
|
|
||||||
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
|
|
||||||
d.bustCacheWithLock(dstKey)
|
|
||||||
return nil
|
|
||||||
} else if err != syscall.EXDEV {
|
|
||||||
// If it failed due to being on a different device, fall back to copying
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Open(srcFilename)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
err = d.writeStreamWithLock(dstKey, f, false)
|
|
||||||
if err == nil && move {
|
|
||||||
err = os.Remove(srcFilename)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads the key and returns the value.
|
|
||||||
// If the key is available in the cache, Read won't touch the disk.
|
|
||||||
// If the key is not in the cache, Read will have the side-effect of
|
|
||||||
// lazily caching the value.
|
|
||||||
func (d *Diskv) Read(key string) ([]byte, error) {
|
|
||||||
rc, err := d.ReadStream(key, false)
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, err
|
|
||||||
}
|
|
||||||
defer rc.Close()
|
|
||||||
return ioutil.ReadAll(rc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
|
|
||||||
// If the value is cached from a previous read, and direct is false,
|
|
||||||
// ReadStream will use the cached value. Otherwise, it will return a handle to
|
|
||||||
// the file on disk, and cache the data on read.
|
|
||||||
//
|
|
||||||
// If direct is true, ReadStream will lazily delete any cached value for the
|
|
||||||
// key, and return a direct handle to the file on disk.
|
|
||||||
//
|
|
||||||
// If compression is enabled, ReadStream taps into the io.Reader stream prior
|
|
||||||
// to decompression, and caches the compressed data.
|
|
||||||
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
|
|
||||||
d.mu.RLock()
|
|
||||||
defer d.mu.RUnlock()
|
|
||||||
|
|
||||||
if val, ok := d.cache[key]; ok {
|
|
||||||
if !direct {
|
|
||||||
buf := bytes.NewBuffer(val)
|
|
||||||
if d.Compression != nil {
|
|
||||||
return d.Compression.Reader(buf)
|
|
||||||
}
|
|
||||||
return ioutil.NopCloser(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
d.uncacheWithLock(key, uint64(len(val)))
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.readWithRLock(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// read ignores the cache, and returns an io.ReadCloser representing the
|
|
||||||
// decompressed data for the given key, streamed from the disk. Clients should
|
|
||||||
// acquire a read lock on the Diskv and check the cache themselves before
|
|
||||||
// calling read.
|
|
||||||
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
|
|
||||||
filename := d.completeFilename(key)
|
|
||||||
|
|
||||||
fi, err := os.Stat(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if fi.IsDir() {
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var r io.Reader
|
|
||||||
if d.CacheSizeMax > 0 {
|
|
||||||
r = newSiphon(f, d, key)
|
|
||||||
} else {
|
|
||||||
r = &closingReader{f}
|
|
||||||
}
|
|
||||||
|
|
||||||
var rc = io.ReadCloser(ioutil.NopCloser(r))
|
|
||||||
if d.Compression != nil {
|
|
||||||
rc, err = d.Compression.Reader(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// closingReader provides a Reader that automatically closes the
|
|
||||||
// embedded ReadCloser when it reaches EOF
|
|
||||||
type closingReader struct {
|
|
||||||
rc io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr closingReader) Read(p []byte) (int, error) {
|
|
||||||
n, err := cr.rc.Read(p)
|
|
||||||
if err == io.EOF {
|
|
||||||
if closeErr := cr.rc.Close(); closeErr != nil {
|
|
||||||
return n, closeErr // close must succeed for Read to succeed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// siphon is like a TeeReader: it copies all data read through it to an
|
|
||||||
// internal buffer, and moves that buffer to the cache at EOF.
|
|
||||||
type siphon struct {
|
|
||||||
f *os.File
|
|
||||||
d *Diskv
|
|
||||||
key string
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
// newSiphon constructs a siphoning reader that represents the passed file.
|
|
||||||
// When a successful series of reads ends in an EOF, the siphon will write
|
|
||||||
// the buffered data to Diskv's cache under the given key.
|
|
||||||
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
|
|
||||||
return &siphon{
|
|
||||||
f: f,
|
|
||||||
d: d,
|
|
||||||
key: key,
|
|
||||||
buf: &bytes.Buffer{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read implements the io.Reader interface for siphon.
|
|
||||||
func (s *siphon) Read(p []byte) (int, error) {
|
|
||||||
n, err := s.f.Read(p)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
|
|
||||||
if closeErr := s.f.Close(); closeErr != nil {
|
|
||||||
return n, closeErr // close must succeed for Read to succeed
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Erase synchronously erases the given key from the disk and the cache.
|
|
||||||
func (d *Diskv) Erase(key string) error {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
d.bustCacheWithLock(key)
|
|
||||||
|
|
||||||
// erase from index
|
|
||||||
if d.Index != nil {
|
|
||||||
d.Index.Delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// erase from disk
|
|
||||||
filename := d.completeFilename(key)
|
|
||||||
if s, err := os.Stat(filename); err == nil {
|
|
||||||
if s.IsDir() {
|
|
||||||
return errBadKey
|
|
||||||
}
|
|
||||||
if err = os.Remove(filename); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Return err as-is so caller can do os.IsNotExist(err).
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean up and return
|
|
||||||
d.pruneDirsWithLock(key)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EraseAll will delete all of the data from the store, both in the cache and on
|
|
||||||
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
|
|
||||||
// diskv-related data. Care should be taken to always specify a diskv base
|
|
||||||
// directory that is exclusively for diskv data.
|
|
||||||
func (d *Diskv) EraseAll() error {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
d.cache = make(map[string][]byte)
|
|
||||||
d.cacheSize = 0
|
|
||||||
if d.TempDir != "" {
|
|
||||||
os.RemoveAll(d.TempDir) // errors ignored
|
|
||||||
}
|
|
||||||
return os.RemoveAll(d.BasePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has returns true if the given key exists.
|
|
||||||
func (d *Diskv) Has(key string) bool {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
if _, ok := d.cache[key]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := d.completeFilename(key)
|
|
||||||
s, err := os.Stat(filename)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if s.IsDir() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a channel that will yield every key accessible by the store,
|
|
||||||
// in undefined order. If a cancel channel is provided, closing it will
|
|
||||||
// terminate and close the keys channel.
|
|
||||||
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
|
|
||||||
return d.KeysPrefix("", cancel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysPrefix returns a channel that will yield every key accessible by the
|
|
||||||
// store with the given prefix, in undefined order. If a cancel channel is
|
|
||||||
// provided, closing it will terminate and close the keys channel. If the
|
|
||||||
// provided prefix is the empty string, all keys will be yielded.
|
|
||||||
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
|
|
||||||
var prepath string
|
|
||||||
if prefix == "" {
|
|
||||||
prepath = d.BasePath
|
|
||||||
} else {
|
|
||||||
prepath = d.pathFor(prefix)
|
|
||||||
}
|
|
||||||
c := make(chan string)
|
|
||||||
go func() {
|
|
||||||
filepath.Walk(prepath, walker(c, prefix, cancel))
|
|
||||||
close(c)
|
|
||||||
}()
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// walker returns a function which satisfies the filepath.WalkFunc interface.
|
|
||||||
// It sends every non-directory file entry down the channel c.
|
|
||||||
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
|
|
||||||
return func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
|
|
||||||
return nil // "pass"
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case c <- info.Name():
|
|
||||||
case <-cancel:
|
|
||||||
return errCanceled
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// pathFor returns the absolute path for location on the filesystem where the
|
|
||||||
// data for the given key will be stored.
|
|
||||||
func (d *Diskv) pathFor(key string) string {
|
|
||||||
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensurePathWithLock is a helper function that generates all necessary
|
|
||||||
// directories on the filesystem for the given key.
|
|
||||||
func (d *Diskv) ensurePathWithLock(key string) error {
|
|
||||||
return os.MkdirAll(d.pathFor(key), d.PathPerm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// completeFilename returns the absolute path to the file for the given key.
|
|
||||||
func (d *Diskv) completeFilename(key string) string {
|
|
||||||
return filepath.Join(d.pathFor(key), key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheWithLock attempts to cache the given key-value pair in the store's
|
|
||||||
// cache. It can fail if the value is larger than the cache's maximum size.
|
|
||||||
func (d *Diskv) cacheWithLock(key string, val []byte) error {
|
|
||||||
valueSize := uint64(len(val))
|
|
||||||
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
|
|
||||||
return fmt.Errorf("%s; not caching", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// be very strict about memory guarantees
|
|
||||||
if (d.cacheSize + valueSize) > d.CacheSizeMax {
|
|
||||||
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.cache[key] = val
|
|
||||||
d.cacheSize += valueSize
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
|
|
||||||
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
return d.cacheWithLock(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Diskv) bustCacheWithLock(key string) {
|
|
||||||
if val, ok := d.cache[key]; ok {
|
|
||||||
d.uncacheWithLock(key, uint64(len(val)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
|
|
||||||
d.cacheSize -= sz
|
|
||||||
delete(d.cache, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// pruneDirsWithLock deletes empty directories in the path walk leading to the
|
|
||||||
// key k. Typically this function is called after an Erase is made.
|
|
||||||
func (d *Diskv) pruneDirsWithLock(key string) error {
|
|
||||||
pathlist := d.Transform(key)
|
|
||||||
for i := range pathlist {
|
|
||||||
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
|
|
||||||
|
|
||||||
// thanks to Steven Blenkinsop for this snippet
|
|
||||||
switch fi, err := os.Stat(dir); true {
|
|
||||||
case err != nil:
|
|
||||||
return err
|
|
||||||
case !fi.IsDir():
|
|
||||||
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if len(nlinks) > 0 {
|
|
||||||
return nil // has subdirs -- do not prune
|
|
||||||
}
|
|
||||||
if err = os.Remove(dir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
|
|
||||||
// until the cache has at least valueSize bytes available.
|
|
||||||
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
|
|
||||||
if valueSize > d.CacheSizeMax {
|
|
||||||
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
|
|
||||||
}
|
|
||||||
|
|
||||||
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
|
|
||||||
|
|
||||||
for key, val := range d.cache {
|
|
||||||
if safe() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.uncacheWithLock(key, uint64(len(val)))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !safe() {
|
|
||||||
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
|
|
||||||
// satisfy the io.WriteCloser interface.
|
|
||||||
type nopWriteCloser struct {
|
|
||||||
io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
|
|
||||||
func (wc *nopWriteCloser) Close() error { return nil }
|
|
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
@ -1,115 +0,0 @@
|
|||||||
package diskv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/google/btree"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Index is a generic interface for things that can
|
|
||||||
// provide an ordered list of keys.
|
|
||||||
type Index interface {
|
|
||||||
Initialize(less LessFunction, keys <-chan string)
|
|
||||||
Insert(key string)
|
|
||||||
Delete(key string)
|
|
||||||
Keys(from string, n int) []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessFunction is used to initialize an Index of keys in a specific order.
|
|
||||||
type LessFunction func(string, string) bool
|
|
||||||
|
|
||||||
// btreeString is a custom data type that satisfies the BTree Less interface,
|
|
||||||
// making the strings it wraps sortable by the BTree package.
|
|
||||||
type btreeString struct {
|
|
||||||
s string
|
|
||||||
l LessFunction
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
|
|
||||||
func (s btreeString) Less(i btree.Item) bool {
|
|
||||||
return s.l(s.s, i.(btreeString).s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BTreeIndex is an implementation of the Index interface using google/btree.
|
|
||||||
type BTreeIndex struct {
|
|
||||||
sync.RWMutex
|
|
||||||
LessFunction
|
|
||||||
*btree.BTree
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize populates the BTree tree with data from the keys channel,
|
|
||||||
// according to the passed less function. It's destructive to the BTreeIndex.
|
|
||||||
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
i.LessFunction = less
|
|
||||||
i.BTree = rebuild(less, keys)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts the given key (only) into the BTree tree.
|
|
||||||
func (i *BTreeIndex) Insert(key string) {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
if i.BTree == nil || i.LessFunction == nil {
|
|
||||||
panic("uninitialized index")
|
|
||||||
}
|
|
||||||
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the given key (only) from the BTree tree.
|
|
||||||
func (i *BTreeIndex) Delete(key string) {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
if i.BTree == nil || i.LessFunction == nil {
|
|
||||||
panic("uninitialized index")
|
|
||||||
}
|
|
||||||
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
|
|
||||||
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
|
|
||||||
// first key in the returned slice will be the key that immediately follows the
|
|
||||||
// passed key, in key order.
|
|
||||||
func (i *BTreeIndex) Keys(from string, n int) []string {
|
|
||||||
i.RLock()
|
|
||||||
defer i.RUnlock()
|
|
||||||
|
|
||||||
if i.BTree == nil || i.LessFunction == nil {
|
|
||||||
panic("uninitialized index")
|
|
||||||
}
|
|
||||||
|
|
||||||
if i.BTree.Len() <= 0 {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
btreeFrom := btreeString{s: from, l: i.LessFunction}
|
|
||||||
skipFirst := true
|
|
||||||
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
|
|
||||||
// no such key, so fabricate an always-smallest item
|
|
||||||
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
|
|
||||||
skipFirst = false
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
iterator := func(i btree.Item) bool {
|
|
||||||
keys = append(keys, i.(btreeString).s)
|
|
||||||
return len(keys) < n
|
|
||||||
}
|
|
||||||
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
|
|
||||||
|
|
||||||
if skipFirst && len(keys) > 0 {
|
|
||||||
keys = keys[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// rebuildIndex does the work of regenerating the index
|
|
||||||
// with the given keys.
|
|
||||||
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
|
|
||||||
tree := btree.New(2)
|
|
||||||
for key := range keys {
|
|
||||||
tree.ReplaceOrInsert(btreeString{s: key, l: less})
|
|
||||||
}
|
|
||||||
return tree
|
|
||||||
}
|
|
162
vendor/golang.org/x/text/cases/cases.go
generated
vendored
162
vendor/golang.org/x/text/cases/cases.go
generated
vendored
@ -1,162 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
//go:generate go run gen.go gen_trieval.go
|
|
||||||
|
|
||||||
// Package cases provides general and language-specific case mappers.
|
|
||||||
package cases // import "golang.org/x/text/cases"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
)
|
|
||||||
|
|
||||||
// References:
|
|
||||||
// - Unicode Reference Manual Chapter 3.13, 4.2, and 5.18.
|
|
||||||
// - http://www.unicode.org/reports/tr29/
|
|
||||||
// - http://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt
|
|
||||||
// - http://www.unicode.org/Public/6.3.0/ucd/SpecialCasing.txt
|
|
||||||
// - http://www.unicode.org/Public/6.3.0/ucd/DerivedCoreProperties.txt
|
|
||||||
// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt
|
|
||||||
// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakTest.txt
|
|
||||||
// - http://userguide.icu-project.org/transforms/casemappings
|
|
||||||
|
|
||||||
// TODO:
|
|
||||||
// - Case folding
|
|
||||||
// - Wide and Narrow?
|
|
||||||
// - Segmenter option for title casing.
|
|
||||||
// - ASCII fast paths
|
|
||||||
// - Encode Soft-Dotted property within trie somehow.
|
|
||||||
|
|
||||||
// A Caser transforms given input to a certain case. It implements
|
|
||||||
// transform.Transformer.
|
|
||||||
//
|
|
||||||
// A Caser may be stateful and should therefore not be shared between
|
|
||||||
// goroutines.
|
|
||||||
type Caser struct {
|
|
||||||
t transform.SpanningTransformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns a new byte slice with the result of converting b to the case
|
|
||||||
// form implemented by c.
|
|
||||||
func (c Caser) Bytes(b []byte) []byte {
|
|
||||||
b, _, _ = transform.Bytes(c.t, b)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string with the result of transforming s to the case form
|
|
||||||
// implemented by c.
|
|
||||||
func (c Caser) String(s string) string {
|
|
||||||
s, _, _ = transform.String(c.t, s)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the Caser to be reused for new input after a previous call to
|
|
||||||
// Transform.
|
|
||||||
func (c Caser) Reset() { c.t.Reset() }
|
|
||||||
|
|
||||||
// Transform implements the transform.Transformer interface and transforms the
|
|
||||||
// given input to the case form implemented by c.
|
|
||||||
func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
return c.t.Transform(dst, src, atEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Span implements the transform.SpanningTransformer interface.
|
|
||||||
func (c Caser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
return c.t.Span(src, atEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upper returns a Caser for language-specific uppercasing.
|
|
||||||
func Upper(t language.Tag, opts ...Option) Caser {
|
|
||||||
return Caser{makeUpper(t, getOpts(opts...))}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lower returns a Caser for language-specific lowercasing.
|
|
||||||
func Lower(t language.Tag, opts ...Option) Caser {
|
|
||||||
return Caser{makeLower(t, getOpts(opts...))}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Title returns a Caser for language-specific title casing. It uses an
|
|
||||||
// approximation of the default Unicode Word Break algorithm.
|
|
||||||
func Title(t language.Tag, opts ...Option) Caser {
|
|
||||||
return Caser{makeTitle(t, getOpts(opts...))}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fold returns a Caser that implements Unicode case folding. The returned Caser
|
|
||||||
// is stateless and safe to use concurrently by multiple goroutines.
|
|
||||||
//
|
|
||||||
// Case folding does not normalize the input and may not preserve a normal form.
|
|
||||||
// Use the collate or search package for more convenient and linguistically
|
|
||||||
// sound comparisons. Use golang.org/x/text/secure/precis for string comparisons
|
|
||||||
// where security aspects are a concern.
|
|
||||||
func Fold(opts ...Option) Caser {
|
|
||||||
return Caser{makeFold(getOpts(opts...))}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Option is used to modify the behavior of a Caser.
|
|
||||||
type Option func(o options) options
|
|
||||||
|
|
||||||
// TODO: consider these options to take a boolean as well, like FinalSigma.
|
|
||||||
// The advantage of using this approach is that other providers of a lower-case
|
|
||||||
// algorithm could set different defaults by prefixing a user-provided slice
|
|
||||||
// of options with their own. This is handy, for instance, for the precis
|
|
||||||
// package which would override the default to not handle the Greek final sigma.
|
|
||||||
|
|
||||||
var (
|
|
||||||
// NoLower disables the lowercasing of non-leading letters for a title
|
|
||||||
// caser.
|
|
||||||
NoLower Option = noLower
|
|
||||||
|
|
||||||
// Compact omits mappings in case folding for characters that would grow the
|
|
||||||
// input. (Unimplemented.)
|
|
||||||
Compact Option = compact
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: option to preserve a normal form, if applicable?
|
|
||||||
|
|
||||||
type options struct {
|
|
||||||
noLower bool
|
|
||||||
simple bool
|
|
||||||
|
|
||||||
// TODO: segmenter, max ignorable, alternative versions, etc.
|
|
||||||
|
|
||||||
ignoreFinalSigma bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOpts(o ...Option) (res options) {
|
|
||||||
for _, f := range o {
|
|
||||||
res = f(res)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func noLower(o options) options {
|
|
||||||
o.noLower = true
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func compact(o options) options {
|
|
||||||
o.simple = true
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleFinalSigma specifies whether the special handling of Greek final sigma
|
|
||||||
// should be enabled. Unicode prescribes handling the Greek final sigma for all
|
|
||||||
// locales, but standards like IDNA and PRECIS override this default.
|
|
||||||
func HandleFinalSigma(enable bool) Option {
|
|
||||||
if enable {
|
|
||||||
return handleFinalSigma
|
|
||||||
}
|
|
||||||
return ignoreFinalSigma
|
|
||||||
}
|
|
||||||
|
|
||||||
func ignoreFinalSigma(o options) options {
|
|
||||||
o.ignoreFinalSigma = true
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleFinalSigma(o options) options {
|
|
||||||
o.ignoreFinalSigma = false
|
|
||||||
return o
|
|
||||||
}
|
|
376
vendor/golang.org/x/text/cases/context.go
generated
vendored
376
vendor/golang.org/x/text/cases/context.go
generated
vendored
@ -1,376 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cases
|
|
||||||
|
|
||||||
import "golang.org/x/text/transform"
|
|
||||||
|
|
||||||
// A context is used for iterating over source bytes, fetching case info and
|
|
||||||
// writing to a destination buffer.
|
|
||||||
//
|
|
||||||
// Casing operations may need more than one rune of context to decide how a rune
|
|
||||||
// should be cased. Casing implementations should call checkpoint on context
|
|
||||||
// whenever it is known to be safe to return the runes processed so far.
|
|
||||||
//
|
|
||||||
// It is recommended for implementations to not allow for more than 30 case
|
|
||||||
// ignorables as lookahead (analogous to the limit in norm) and to use state if
|
|
||||||
// unbounded lookahead is needed for cased runes.
|
|
||||||
type context struct {
|
|
||||||
dst, src []byte
|
|
||||||
atEOF bool
|
|
||||||
|
|
||||||
pDst int // pDst points past the last written rune in dst.
|
|
||||||
pSrc int // pSrc points to the start of the currently scanned rune.
|
|
||||||
|
|
||||||
// checkpoints safe to return in Transform, where nDst <= pDst and nSrc <= pSrc.
|
|
||||||
nDst, nSrc int
|
|
||||||
err error
|
|
||||||
|
|
||||||
sz int // size of current rune
|
|
||||||
info info // case information of currently scanned rune
|
|
||||||
|
|
||||||
// State preserved across calls to Transform.
|
|
||||||
isMidWord bool // false if next cased letter needs to be title-cased.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *context) Reset() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ret returns the return values for the Transform method. It checks whether
|
|
||||||
// there were insufficient bytes in src to complete and introduces an error
|
|
||||||
// accordingly, if necessary.
|
|
||||||
func (c *context) ret() (nDst, nSrc int, err error) {
|
|
||||||
if c.err != nil || c.nSrc == len(c.src) {
|
|
||||||
return c.nDst, c.nSrc, c.err
|
|
||||||
}
|
|
||||||
// This point is only reached by mappers if there was no short destination
|
|
||||||
// buffer. This means that the source buffer was exhausted and that c.sz was
|
|
||||||
// set to 0 by next.
|
|
||||||
if c.atEOF && c.pSrc == len(c.src) {
|
|
||||||
return c.pDst, c.pSrc, nil
|
|
||||||
}
|
|
||||||
return c.nDst, c.nSrc, transform.ErrShortSrc
|
|
||||||
}
|
|
||||||
|
|
||||||
// retSpan returns the return values for the Span method. It checks whether
|
|
||||||
// there were insufficient bytes in src to complete and introduces an error
|
|
||||||
// accordingly, if necessary.
|
|
||||||
func (c *context) retSpan() (n int, err error) {
|
|
||||||
_, nSrc, err := c.ret()
|
|
||||||
return nSrc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkpoint sets the return value buffer points for Transform to the current
|
|
||||||
// positions.
|
|
||||||
func (c *context) checkpoint() {
|
|
||||||
if c.err == nil {
|
|
||||||
c.nDst, c.nSrc = c.pDst, c.pSrc+c.sz
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// unreadRune causes the last rune read by next to be reread on the next
|
|
||||||
// invocation of next. Only one unreadRune may be called after a call to next.
|
|
||||||
func (c *context) unreadRune() {
|
|
||||||
c.sz = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *context) next() bool {
|
|
||||||
c.pSrc += c.sz
|
|
||||||
if c.pSrc == len(c.src) || c.err != nil {
|
|
||||||
c.info, c.sz = 0, 0
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
v, sz := trie.lookup(c.src[c.pSrc:])
|
|
||||||
c.info, c.sz = info(v), sz
|
|
||||||
if c.sz == 0 {
|
|
||||||
if c.atEOF {
|
|
||||||
// A zero size means we have an incomplete rune. If we are atEOF,
|
|
||||||
// this means it is an illegal rune, which we will consume one
|
|
||||||
// byte at a time.
|
|
||||||
c.sz = 1
|
|
||||||
} else {
|
|
||||||
c.err = transform.ErrShortSrc
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBytes adds bytes to dst.
|
|
||||||
func (c *context) writeBytes(b []byte) bool {
|
|
||||||
if len(c.dst)-c.pDst < len(b) {
|
|
||||||
c.err = transform.ErrShortDst
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// This loop is faster than using copy.
|
|
||||||
for _, ch := range b {
|
|
||||||
c.dst[c.pDst] = ch
|
|
||||||
c.pDst++
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeString writes the given string to dst.
|
|
||||||
func (c *context) writeString(s string) bool {
|
|
||||||
if len(c.dst)-c.pDst < len(s) {
|
|
||||||
c.err = transform.ErrShortDst
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// This loop is faster than using copy.
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
c.dst[c.pDst] = s[i]
|
|
||||||
c.pDst++
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy writes the current rune to dst.
|
|
||||||
func (c *context) copy() bool {
|
|
||||||
return c.writeBytes(c.src[c.pSrc : c.pSrc+c.sz])
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyXOR copies the current rune to dst and modifies it by applying the XOR
|
|
||||||
// pattern of the case info. It is the responsibility of the caller to ensure
|
|
||||||
// that this is a rune with a XOR pattern defined.
|
|
||||||
func (c *context) copyXOR() bool {
|
|
||||||
if !c.copy() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if c.info&xorIndexBit == 0 {
|
|
||||||
// Fast path for 6-bit XOR pattern, which covers most cases.
|
|
||||||
c.dst[c.pDst-1] ^= byte(c.info >> xorShift)
|
|
||||||
} else {
|
|
||||||
// Interpret XOR bits as an index.
|
|
||||||
// TODO: test performance for unrolling this loop. Verify that we have
|
|
||||||
// at least two bytes and at most three.
|
|
||||||
idx := c.info >> xorShift
|
|
||||||
for p := c.pDst - 1; ; p-- {
|
|
||||||
c.dst[p] ^= xorData[idx]
|
|
||||||
idx--
|
|
||||||
if xorData[idx] == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasPrefix returns true if src[pSrc:] starts with the given string.
|
|
||||||
func (c *context) hasPrefix(s string) bool {
|
|
||||||
b := c.src[c.pSrc:]
|
|
||||||
if len(b) < len(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, c := range b[:len(s)] {
|
|
||||||
if c != s[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// caseType returns an info with only the case bits, normalized to either
|
|
||||||
// cLower, cUpper, cTitle or cUncased.
|
|
||||||
func (c *context) caseType() info {
|
|
||||||
cm := c.info & 0x7
|
|
||||||
if cm < 4 {
|
|
||||||
return cm
|
|
||||||
}
|
|
||||||
if cm >= cXORCase {
|
|
||||||
// xor the last bit of the rune with the case type bits.
|
|
||||||
b := c.src[c.pSrc+c.sz-1]
|
|
||||||
return info(b&1) ^ cm&0x3
|
|
||||||
}
|
|
||||||
if cm == cIgnorableCased {
|
|
||||||
return cLower
|
|
||||||
}
|
|
||||||
return cUncased
|
|
||||||
}
|
|
||||||
|
|
||||||
// lower writes the lowercase version of the current rune to dst.
|
|
||||||
func lower(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cLower {
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
return c.copyXOR()
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
offset := 2 + e[0]&lengthMask // size of header + fold string
|
|
||||||
if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange {
|
|
||||||
return c.writeString(e[offset : offset+nLower])
|
|
||||||
}
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
func isLower(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cLower {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// upper writes the uppercase version of the current rune to dst.
|
|
||||||
func upper(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cUpper {
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
return c.copyXOR()
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
offset := 2 + e[0]&lengthMask // size of header + fold string
|
|
||||||
// Get length of first special case mapping.
|
|
||||||
n := (e[1] >> lengthBits) & lengthMask
|
|
||||||
if ct == cTitle {
|
|
||||||
// The first special case mapping is for lower. Set n to the second.
|
|
||||||
if n == noChange {
|
|
||||||
n = 0
|
|
||||||
}
|
|
||||||
n, e = e[1]&lengthMask, e[n:]
|
|
||||||
}
|
|
||||||
if n != noChange {
|
|
||||||
return c.writeString(e[offset : offset+n])
|
|
||||||
}
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
// isUpper writes the isUppercase version of the current rune to dst.
|
|
||||||
func isUpper(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cUpper {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
// Get length of first special case mapping.
|
|
||||||
n := (e[1] >> lengthBits) & lengthMask
|
|
||||||
if ct == cTitle {
|
|
||||||
n = e[1] & lengthMask
|
|
||||||
}
|
|
||||||
if n != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// title writes the title case version of the current rune to dst.
|
|
||||||
func title(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cTitle {
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
if ct == cLower {
|
|
||||||
return c.copyXOR()
|
|
||||||
}
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
// Get the exception data.
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
offset := 2 + e[0]&lengthMask // size of header + fold string
|
|
||||||
|
|
||||||
nFirst := (e[1] >> lengthBits) & lengthMask
|
|
||||||
if nTitle := e[1] & lengthMask; nTitle != noChange {
|
|
||||||
if nFirst != noChange {
|
|
||||||
e = e[nFirst:]
|
|
||||||
}
|
|
||||||
return c.writeString(e[offset : offset+nTitle])
|
|
||||||
}
|
|
||||||
if ct == cLower && nFirst != noChange {
|
|
||||||
// Use the uppercase version instead.
|
|
||||||
return c.writeString(e[offset : offset+nFirst])
|
|
||||||
}
|
|
||||||
// Already in correct case.
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
// isTitle reports whether the current rune is in title case.
|
|
||||||
func isTitle(c *context) bool {
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&hasMappingMask == 0 || ct == cTitle {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
if ct == cLower {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Get the exception data.
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
if nTitle := e[1] & lengthMask; nTitle != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
nFirst := (e[1] >> lengthBits) & lengthMask
|
|
||||||
if ct == cLower && nFirst != noChange {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// foldFull writes the foldFull version of the current rune to dst.
|
|
||||||
func foldFull(c *context) bool {
|
|
||||||
if c.info&hasMappingMask == 0 {
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
if ct != cLower || c.info&inverseFoldBit != 0 {
|
|
||||||
return c.copyXOR()
|
|
||||||
}
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
n := e[0] & lengthMask
|
|
||||||
if n == 0 {
|
|
||||||
if ct == cLower {
|
|
||||||
return c.copy()
|
|
||||||
}
|
|
||||||
n = (e[1] >> lengthBits) & lengthMask
|
|
||||||
}
|
|
||||||
return c.writeString(e[2 : 2+n])
|
|
||||||
}
|
|
||||||
|
|
||||||
// isFoldFull reports whether the current run is mapped to foldFull
|
|
||||||
func isFoldFull(c *context) bool {
|
|
||||||
if c.info&hasMappingMask == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
ct := c.caseType()
|
|
||||||
if c.info&exceptionBit == 0 {
|
|
||||||
if ct != cLower || c.info&inverseFoldBit != 0 {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
e := exceptions[c.info>>exceptionShift:]
|
|
||||||
n := e[0] & lengthMask
|
|
||||||
if n == 0 && ct == cLower {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
34
vendor/golang.org/x/text/cases/fold.go
generated
vendored
34
vendor/golang.org/x/text/cases/fold.go
generated
vendored
@ -1,34 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cases
|
|
||||||
|
|
||||||
import "golang.org/x/text/transform"
|
|
||||||
|
|
||||||
type caseFolder struct{ transform.NopResetter }
|
|
||||||
|
|
||||||
// caseFolder implements the Transformer interface for doing case folding.
|
|
||||||
func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
for c.next() {
|
|
||||||
foldFull(&c)
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isFoldFull(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeFold(o options) transform.SpanningTransformer {
|
|
||||||
// TODO: Special case folding, through option Language, Special/Turkic, or
|
|
||||||
// both.
|
|
||||||
// TODO: Implement Compact options.
|
|
||||||
return &caseFolder{}
|
|
||||||
}
|
|
61
vendor/golang.org/x/text/cases/icu.go
generated
vendored
61
vendor/golang.org/x/text/cases/icu.go
generated
vendored
@ -1,61 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build icu
|
|
||||||
|
|
||||||
package cases
|
|
||||||
|
|
||||||
// Ideally these functions would be defined in a test file, but go test doesn't
|
|
||||||
// allow CGO in tests. The build tag should ensure either way that these
|
|
||||||
// functions will not end up in the package.
|
|
||||||
|
|
||||||
// TODO: Ensure that the correct ICU version is set.
|
|
||||||
|
|
||||||
/*
|
|
||||||
#cgo LDFLAGS: -licui18n.57 -licuuc.57
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <unicode/ustring.h>
|
|
||||||
#include <unicode/utypes.h>
|
|
||||||
#include <unicode/localpointer.h>
|
|
||||||
#include <unicode/ucasemap.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import "unsafe"
|
|
||||||
|
|
||||||
func doICU(tag, caser, input string) string {
|
|
||||||
err := C.UErrorCode(0)
|
|
||||||
loc := C.CString(tag)
|
|
||||||
cm := C.ucasemap_open(loc, C.uint32_t(0), &err)
|
|
||||||
|
|
||||||
buf := make([]byte, len(input)*4)
|
|
||||||
dst := (*C.char)(unsafe.Pointer(&buf[0]))
|
|
||||||
src := C.CString(input)
|
|
||||||
|
|
||||||
cn := C.int32_t(0)
|
|
||||||
|
|
||||||
switch caser {
|
|
||||||
case "fold":
|
|
||||||
cn = C.ucasemap_utf8FoldCase(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
case "lower":
|
|
||||||
cn = C.ucasemap_utf8ToLower(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
case "upper":
|
|
||||||
cn = C.ucasemap_utf8ToUpper(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
case "title":
|
|
||||||
cn = C.ucasemap_utf8ToTitle(cm,
|
|
||||||
dst, C.int32_t(len(buf)),
|
|
||||||
src, C.int32_t(len(input)),
|
|
||||||
&err)
|
|
||||||
}
|
|
||||||
return string(buf[:cn])
|
|
||||||
}
|
|
82
vendor/golang.org/x/text/cases/info.go
generated
vendored
82
vendor/golang.org/x/text/cases/info.go
generated
vendored
@ -1,82 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cases
|
|
||||||
|
|
||||||
func (c info) cccVal() info {
|
|
||||||
if c&exceptionBit != 0 {
|
|
||||||
return info(exceptions[c>>exceptionShift]) & cccMask
|
|
||||||
}
|
|
||||||
return c & cccMask
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) cccType() info {
|
|
||||||
ccc := c.cccVal()
|
|
||||||
if ccc <= cccZero {
|
|
||||||
return cccZero
|
|
||||||
}
|
|
||||||
return ccc
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Implement full Unicode breaking algorithm:
|
|
||||||
// 1) Implement breaking in separate package.
|
|
||||||
// 2) Use the breaker here.
|
|
||||||
// 3) Compare table size and performance of using the more generic breaker.
|
|
||||||
//
|
|
||||||
// Note that we can extend the current algorithm to be much more accurate. This
|
|
||||||
// only makes sense, though, if the performance and/or space penalty of using
|
|
||||||
// the generic breaker is big. Extra data will only be needed for non-cased
|
|
||||||
// runes, which means there are sufficient bits left in the caseType.
|
|
||||||
// ICU prohibits breaking in such cases as well.
|
|
||||||
|
|
||||||
// For the purpose of title casing we use an approximation of the Unicode Word
|
|
||||||
// Breaking algorithm defined in Annex #29:
|
|
||||||
// http://www.unicode.org/reports/tr29/#Default_Grapheme_Cluster_Table.
|
|
||||||
//
|
|
||||||
// For our approximation, we group the Word Break types into the following
|
|
||||||
// categories, with associated rules:
|
|
||||||
//
|
|
||||||
// 1) Letter:
|
|
||||||
// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ.
|
|
||||||
// Rule: Never break between consecutive runes of this category.
|
|
||||||
//
|
|
||||||
// 2) Mid:
|
|
||||||
// MidLetter, MidNumLet, Single_Quote.
|
|
||||||
// (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn,
|
|
||||||
// Me, Cf, Lm or Sk).
|
|
||||||
// Rule: Don't break between Letter and Mid, but break between two Mids.
|
|
||||||
//
|
|
||||||
// 3) Break:
|
|
||||||
// Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and
|
|
||||||
// Other.
|
|
||||||
// These categories should always result in a break between two cased letters.
|
|
||||||
// Rule: Always break.
|
|
||||||
//
|
|
||||||
// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in
|
|
||||||
// preventing a break between two cased letters. For now we will ignore this
|
|
||||||
// (e.g. [ALetter] [ExtendNumLet] [Katakana] [ExtendNumLet] [ALetter] and
|
|
||||||
// [ALetter] [Numeric] [MidNum] [Numeric] [ALetter].)
|
|
||||||
//
|
|
||||||
// Note 2: the rule for Mid is very approximate, but works in most cases. To
|
|
||||||
// improve, we could store the categories in the trie value and use a FA to
|
|
||||||
// manage breaks. See TODO comment above.
|
|
||||||
//
|
|
||||||
// Note 3: according to the spec, it is possible for the Extend category to
|
|
||||||
// introduce breaks between other categories grouped in Letter. However, this
|
|
||||||
// is undesirable for our purposes. ICU prevents breaks in such cases as well.
|
|
||||||
|
|
||||||
// isBreak returns whether this rune should introduce a break.
|
|
||||||
func (c info) isBreak() bool {
|
|
||||||
return c.cccVal() == cccBreak
|
|
||||||
}
|
|
||||||
|
|
||||||
// isLetter returns whether the rune is of break type ALetter, Hebrew_Letter,
|
|
||||||
// Numeric, ExtendNumLet, or Extend.
|
|
||||||
func (c info) isLetter() bool {
|
|
||||||
ccc := c.cccVal()
|
|
||||||
if ccc == cccZero {
|
|
||||||
return !c.isCaseIgnorable()
|
|
||||||
}
|
|
||||||
return ccc != cccBreak
|
|
||||||
}
|
|
816
vendor/golang.org/x/text/cases/map.go
generated
vendored
816
vendor/golang.org/x/text/cases/map.go
generated
vendored
@ -1,816 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cases
|
|
||||||
|
|
||||||
// This file contains the definitions of case mappings for all supported
|
|
||||||
// languages. The rules for the language-specific tailorings were taken and
|
|
||||||
// modified from the CLDR transform definitions in common/transforms.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal"
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A mapFunc takes a context set to the current rune and writes the mapped
|
|
||||||
// version to the same context. It may advance the context to the next rune. It
|
|
||||||
// returns whether a checkpoint is possible: whether the pDst bytes written to
|
|
||||||
// dst so far won't need changing as we see more source bytes.
|
|
||||||
type mapFunc func(*context) bool
|
|
||||||
|
|
||||||
// A spanFunc takes a context set to the current rune and returns whether this
|
|
||||||
// rune would be altered when written to the output. It may advance the context
|
|
||||||
// to the next rune. It returns whether a checkpoint is possible.
|
|
||||||
type spanFunc func(*context) bool
|
|
||||||
|
|
||||||
// maxIgnorable defines the maximum number of ignorables to consider for
|
|
||||||
// lookahead operations.
|
|
||||||
const maxIgnorable = 30
|
|
||||||
|
|
||||||
// supported lists the language tags for which we have tailorings.
|
|
||||||
const supported = "und af az el lt nl tr"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
tags := []language.Tag{}
|
|
||||||
for _, s := range strings.Split(supported, " ") {
|
|
||||||
tags = append(tags, language.MustParse(s))
|
|
||||||
}
|
|
||||||
matcher = internal.NewInheritanceMatcher(tags)
|
|
||||||
Supported = language.NewCoverage(tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
matcher *internal.InheritanceMatcher
|
|
||||||
|
|
||||||
Supported language.Coverage
|
|
||||||
|
|
||||||
// We keep the following lists separate, instead of having a single per-
|
|
||||||
// language struct, to give the compiler a chance to remove unused code.
|
|
||||||
|
|
||||||
// Some uppercase mappers are stateless, so we can precompute the
|
|
||||||
// Transformers and save a bit on runtime allocations.
|
|
||||||
upperFunc = []struct {
|
|
||||||
upper mapFunc
|
|
||||||
span spanFunc
|
|
||||||
}{
|
|
||||||
{nil, nil}, // und
|
|
||||||
{nil, nil}, // af
|
|
||||||
{aztrUpper(upper), isUpper}, // az
|
|
||||||
{elUpper, noSpan}, // el
|
|
||||||
{ltUpper(upper), noSpan}, // lt
|
|
||||||
{nil, nil}, // nl
|
|
||||||
{aztrUpper(upper), isUpper}, // tr
|
|
||||||
}
|
|
||||||
|
|
||||||
undUpper transform.SpanningTransformer = &undUpperCaser{}
|
|
||||||
undLower transform.SpanningTransformer = &undLowerCaser{}
|
|
||||||
undLowerIgnoreSigma transform.SpanningTransformer = &undLowerIgnoreSigmaCaser{}
|
|
||||||
|
|
||||||
lowerFunc = []mapFunc{
|
|
||||||
nil, // und
|
|
||||||
nil, // af
|
|
||||||
aztrLower, // az
|
|
||||||
nil, // el
|
|
||||||
ltLower, // lt
|
|
||||||
nil, // nl
|
|
||||||
aztrLower, // tr
|
|
||||||
}
|
|
||||||
|
|
||||||
titleInfos = []struct {
|
|
||||||
title mapFunc
|
|
||||||
lower mapFunc
|
|
||||||
titleSpan spanFunc
|
|
||||||
rewrite func(*context)
|
|
||||||
}{
|
|
||||||
{title, lower, isTitle, nil}, // und
|
|
||||||
{title, lower, isTitle, afnlRewrite}, // af
|
|
||||||
{aztrUpper(title), aztrLower, isTitle, nil}, // az
|
|
||||||
{title, lower, isTitle, nil}, // el
|
|
||||||
{ltUpper(title), ltLower, noSpan, nil}, // lt
|
|
||||||
{nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl
|
|
||||||
{aztrUpper(title), aztrLower, isTitle, nil}, // tr
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func makeUpper(t language.Tag, o options) transform.SpanningTransformer {
|
|
||||||
_, i, _ := matcher.Match(t)
|
|
||||||
f := upperFunc[i].upper
|
|
||||||
if f == nil {
|
|
||||||
return undUpper
|
|
||||||
}
|
|
||||||
return &simpleCaser{f: f, span: upperFunc[i].span}
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeLower(t language.Tag, o options) transform.SpanningTransformer {
|
|
||||||
_, i, _ := matcher.Match(t)
|
|
||||||
f := lowerFunc[i]
|
|
||||||
if f == nil {
|
|
||||||
if o.ignoreFinalSigma {
|
|
||||||
return undLowerIgnoreSigma
|
|
||||||
}
|
|
||||||
return undLower
|
|
||||||
}
|
|
||||||
if o.ignoreFinalSigma {
|
|
||||||
return &simpleCaser{f: f, span: isLower}
|
|
||||||
}
|
|
||||||
return &lowerCaser{
|
|
||||||
first: f,
|
|
||||||
midWord: finalSigma(f),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeTitle(t language.Tag, o options) transform.SpanningTransformer {
|
|
||||||
_, i, _ := matcher.Match(t)
|
|
||||||
x := &titleInfos[i]
|
|
||||||
lower := x.lower
|
|
||||||
if o.noLower {
|
|
||||||
lower = (*context).copy
|
|
||||||
} else if !o.ignoreFinalSigma {
|
|
||||||
lower = finalSigma(lower)
|
|
||||||
}
|
|
||||||
return &titleCaser{
|
|
||||||
title: x.title,
|
|
||||||
lower: lower,
|
|
||||||
titleSpan: x.titleSpan,
|
|
||||||
rewrite: x.rewrite,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func noSpan(c *context) bool {
|
|
||||||
c.err = transform.ErrEndOfSpan
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: consider a similar special case for the fast majority lower case. This
|
|
||||||
// is a bit more involved so will require some more precise benchmarking to
|
|
||||||
// justify it.
|
|
||||||
|
|
||||||
type undUpperCaser struct{ transform.NopResetter }
|
|
||||||
|
|
||||||
// undUpperCaser implements the Transformer interface for doing an upper case
|
|
||||||
// mapping for the root locale (und). It eliminates the need for an allocation
|
|
||||||
// as it prevents escaping by not using function pointers.
|
|
||||||
func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
for c.next() {
|
|
||||||
upper(&c)
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isUpper(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// undLowerIgnoreSigmaCaser implements the Transformer interface for doing
|
|
||||||
// a lower case mapping for the root locale (und) ignoring final sigma
|
|
||||||
// handling. This casing algorithm is used in some performance-critical packages
|
|
||||||
// like secure/precis and x/net/http/idna, which warrants its special-casing.
|
|
||||||
type undLowerIgnoreSigmaCaser struct{ transform.NopResetter }
|
|
||||||
|
|
||||||
func (t undLowerIgnoreSigmaCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
for c.next() && lower(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Span implements a generic lower-casing. This is possible as isLower works
|
|
||||||
// for all lowercasing variants. All lowercase variants only vary in how they
|
|
||||||
// transform a non-lowercase letter. They will never change an already lowercase
|
|
||||||
// letter. In addition, there is no state.
|
|
||||||
func (t undLowerIgnoreSigmaCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isLower(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
type simpleCaser struct {
|
|
||||||
context
|
|
||||||
f mapFunc
|
|
||||||
span spanFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// simpleCaser implements the Transformer interface for doing a case operation
|
|
||||||
// on a rune-by-rune basis.
|
|
||||||
func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
for c.next() && t.f(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && t.span(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// undLowerCaser implements the Transformer interface for doing a lower case
|
|
||||||
// mapping for the root locale (und) ignoring final sigma handling. This casing
|
|
||||||
// algorithm is used in some performance-critical packages like secure/precis
|
|
||||||
// and x/net/http/idna, which warrants its special-casing.
|
|
||||||
type undLowerCaser struct{ transform.NopResetter }
|
|
||||||
|
|
||||||
func (t undLowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
c := context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
|
|
||||||
for isInterWord := true; c.next(); {
|
|
||||||
if isInterWord {
|
|
||||||
if c.info.isCased() {
|
|
||||||
if !lower(&c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
isInterWord = false
|
|
||||||
} else if !c.copy() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if c.info.isNotCasedAndNotCaseIgnorable() {
|
|
||||||
if !c.copy() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
isInterWord = true
|
|
||||||
} else if !c.hasPrefix("Σ") {
|
|
||||||
if !lower(&c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if !finalSigmaBody(&c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t undLowerCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
c := context{src: src, atEOF: atEOF}
|
|
||||||
for c.next() && isLower(&c) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lowerCaser implements the Transformer interface. The default Unicode lower
|
|
||||||
// casing requires different treatment for the first and subsequent characters
|
|
||||||
// of a word, most notably to handle the Greek final Sigma.
|
|
||||||
type lowerCaser struct {
|
|
||||||
undLowerIgnoreSigmaCaser
|
|
||||||
|
|
||||||
context
|
|
||||||
|
|
||||||
first, midWord mapFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *lowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
t.context = context{dst: dst, src: src, atEOF: atEOF}
|
|
||||||
c := &t.context
|
|
||||||
|
|
||||||
for isInterWord := true; c.next(); {
|
|
||||||
if isInterWord {
|
|
||||||
if c.info.isCased() {
|
|
||||||
if !t.first(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
isInterWord = false
|
|
||||||
} else if !c.copy() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if c.info.isNotCasedAndNotCaseIgnorable() {
|
|
||||||
if !c.copy() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
isInterWord = true
|
|
||||||
} else if !t.midWord(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
// titleCaser implements the Transformer interface. Title casing algorithms
|
|
||||||
// distinguish between the first letter of a word and subsequent letters of the
|
|
||||||
// same word. It uses state to avoid requiring a potentially infinite lookahead.
|
|
||||||
type titleCaser struct {
|
|
||||||
context
|
|
||||||
|
|
||||||
// rune mappings used by the actual casing algorithms.
|
|
||||||
title mapFunc
|
|
||||||
lower mapFunc
|
|
||||||
titleSpan spanFunc
|
|
||||||
|
|
||||||
rewrite func(*context)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform implements the standard Unicode title case algorithm as defined in
|
|
||||||
// Chapter 3 of The Unicode Standard:
|
|
||||||
// toTitlecase(X): Find the word boundaries in X according to Unicode Standard
|
|
||||||
// Annex #29, "Unicode Text Segmentation." For each word boundary, find the
|
|
||||||
// first cased character F following the word boundary. If F exists, map F to
|
|
||||||
// Titlecase_Mapping(F); then map all characters C between F and the following
|
|
||||||
// word boundary to Lowercase_Mapping(C).
|
|
||||||
func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
t.context = context{dst: dst, src: src, atEOF: atEOF, isMidWord: t.isMidWord}
|
|
||||||
c := &t.context
|
|
||||||
|
|
||||||
if !c.next() {
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
p := c.info
|
|
||||||
if t.rewrite != nil {
|
|
||||||
t.rewrite(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
wasMid := p.isMid()
|
|
||||||
// Break out of this loop on failure to ensure we do not modify the
|
|
||||||
// state incorrectly.
|
|
||||||
if p.isCased() {
|
|
||||||
if !c.isMidWord {
|
|
||||||
if !t.title(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c.isMidWord = true
|
|
||||||
} else if !t.lower(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if !c.copy() {
|
|
||||||
break
|
|
||||||
} else if p.isBreak() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// As we save the state of the transformer, it is safe to call
|
|
||||||
// checkpoint after any successful write.
|
|
||||||
if !(c.isMidWord && wasMid) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.next() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if wasMid && c.info.isMid() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.ret()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord}
|
|
||||||
c := &t.context
|
|
||||||
|
|
||||||
if !c.next() {
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
p := c.info
|
|
||||||
if t.rewrite != nil {
|
|
||||||
t.rewrite(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
wasMid := p.isMid()
|
|
||||||
// Break out of this loop on failure to ensure we do not modify the
|
|
||||||
// state incorrectly.
|
|
||||||
if p.isCased() {
|
|
||||||
if !c.isMidWord {
|
|
||||||
if !t.titleSpan(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c.isMidWord = true
|
|
||||||
} else if !isLower(c) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if p.isBreak() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
// As we save the state of the transformer, it is safe to call
|
|
||||||
// checkpoint after any successful write.
|
|
||||||
if !(c.isMidWord && wasMid) {
|
|
||||||
c.checkpoint()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.next() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if wasMid && c.info.isMid() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.retSpan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// finalSigma adds Greek final Sigma handing to another casing function. It
|
|
||||||
// determines whether a lowercased sigma should be σ or ς, by looking ahead for
|
|
||||||
// case-ignorables and a cased letters.
|
|
||||||
func finalSigma(f mapFunc) mapFunc {
|
|
||||||
return func(c *context) bool {
|
|
||||||
if !c.hasPrefix("Σ") {
|
|
||||||
return f(c)
|
|
||||||
}
|
|
||||||
return finalSigmaBody(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func finalSigmaBody(c *context) bool {
|
|
||||||
// Current rune must be ∑.
|
|
||||||
|
|
||||||
// ::NFD();
|
|
||||||
// # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA
|
|
||||||
// Σ } [:case-ignorable:]* [:cased:] → σ;
|
|
||||||
// [:cased:] [:case-ignorable:]* { Σ → ς;
|
|
||||||
// ::Any-Lower;
|
|
||||||
// ::NFC();
|
|
||||||
|
|
||||||
p := c.pDst
|
|
||||||
c.writeString("ς")
|
|
||||||
|
|
||||||
// TODO: we should do this here, but right now this will never have an
|
|
||||||
// effect as this is called when the prefix is Sigma, whereas Dutch and
|
|
||||||
// Afrikaans only test for an apostrophe.
|
|
||||||
//
|
|
||||||
// if t.rewrite != nil {
|
|
||||||
// t.rewrite(c)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// We need to do one more iteration after maxIgnorable, as a cased
|
|
||||||
// letter is not an ignorable and may modify the result.
|
|
||||||
wasMid := false
|
|
||||||
for i := 0; i < maxIgnorable+1; i++ {
|
|
||||||
if !c.next() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !c.info.isCaseIgnorable() {
|
|
||||||
// All Midword runes are also case ignorable, so we are
|
|
||||||
// guaranteed to have a letter or word break here. As we are
|
|
||||||
// unreading the run, there is no need to unset c.isMidWord;
|
|
||||||
// the title caser will handle this.
|
|
||||||
if c.info.isCased() {
|
|
||||||
// p+1 is guaranteed to be in bounds: if writing ς was
|
|
||||||
// successful, p+1 will contain the second byte of ς. If not,
|
|
||||||
// this function will have returned after c.next returned false.
|
|
||||||
c.dst[p+1]++ // ς → σ
|
|
||||||
}
|
|
||||||
c.unreadRune()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// A case ignorable may also introduce a word break, so we may need
|
|
||||||
// to continue searching even after detecting a break.
|
|
||||||
isMid := c.info.isMid()
|
|
||||||
if (wasMid && isMid) || c.info.isBreak() {
|
|
||||||
c.isMidWord = false
|
|
||||||
}
|
|
||||||
wasMid = isMid
|
|
||||||
c.copy()
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// finalSigmaSpan would be the same as isLower.
|
|
||||||
|
|
||||||
// elUpper implements Greek upper casing, which entails removing a predefined
|
|
||||||
// set of non-blocked modifiers. Note that these accents should not be removed
|
|
||||||
// for title casing!
|
|
||||||
// Example: "Οδός" -> "ΟΔΟΣ".
|
|
||||||
func elUpper(c *context) bool {
|
|
||||||
// From CLDR:
|
|
||||||
// [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Above:]]*? { [\u0313\u0314\u0301\u0300\u0306\u0342\u0308\u0304] → ;
|
|
||||||
// [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Iota_Subscript:]]*? { \u0345 → ;
|
|
||||||
|
|
||||||
r, _ := utf8.DecodeRune(c.src[c.pSrc:])
|
|
||||||
oldPDst := c.pDst
|
|
||||||
if !upper(c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !unicode.Is(unicode.Greek, r) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
i := 0
|
|
||||||
// Take the properties of the uppercased rune that is already written to the
|
|
||||||
// destination. This saves us the trouble of having to uppercase the
|
|
||||||
// decomposed rune again.
|
|
||||||
if b := norm.NFD.Properties(c.dst[oldPDst:]).Decomposition(); b != nil {
|
|
||||||
// Restore the destination position and process the decomposed rune.
|
|
||||||
r, sz := utf8.DecodeRune(b)
|
|
||||||
if r <= 0xFF { // See A.6.1
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
c.pDst = oldPDst
|
|
||||||
// Insert the first rune and ignore the modifiers. See A.6.2.
|
|
||||||
c.writeBytes(b[:sz])
|
|
||||||
i = len(b[sz:]) / 2 // Greek modifiers are always of length 2.
|
|
||||||
}
|
|
||||||
|
|
||||||
for ; i < maxIgnorable && c.next(); i++ {
|
|
||||||
switch r, _ := utf8.DecodeRune(c.src[c.pSrc:]); r {
|
|
||||||
// Above and Iota Subscript
|
|
||||||
case 0x0300, // U+0300 COMBINING GRAVE ACCENT
|
|
||||||
0x0301, // U+0301 COMBINING ACUTE ACCENT
|
|
||||||
0x0304, // U+0304 COMBINING MACRON
|
|
||||||
0x0306, // U+0306 COMBINING BREVE
|
|
||||||
0x0308, // U+0308 COMBINING DIAERESIS
|
|
||||||
0x0313, // U+0313 COMBINING COMMA ABOVE
|
|
||||||
0x0314, // U+0314 COMBINING REVERSED COMMA ABOVE
|
|
||||||
0x0342, // U+0342 COMBINING GREEK PERISPOMENI
|
|
||||||
0x0345: // U+0345 COMBINING GREEK YPOGEGRAMMENI
|
|
||||||
// No-op. Gobble the modifier.
|
|
||||||
|
|
||||||
default:
|
|
||||||
switch v, _ := trie.lookup(c.src[c.pSrc:]); info(v).cccType() {
|
|
||||||
case cccZero:
|
|
||||||
c.unreadRune()
|
|
||||||
return true
|
|
||||||
|
|
||||||
// We don't need to test for IotaSubscript as the only rune that
|
|
||||||
// qualifies (U+0345) was already excluded in the switch statement
|
|
||||||
// above. See A.4.
|
|
||||||
|
|
||||||
case cccAbove:
|
|
||||||
return c.copy()
|
|
||||||
default:
|
|
||||||
// Some other modifier. We're still allowed to gobble Greek
|
|
||||||
// modifiers after this.
|
|
||||||
c.copy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i == maxIgnorable
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: implement elUpperSpan (low-priority: complex and infrequent).
|
|
||||||
|
|
||||||
func ltLower(c *context) bool {
|
|
||||||
// From CLDR:
|
|
||||||
// # Introduce an explicit dot above when lowercasing capital I's and J's
|
|
||||||
// # whenever there are more accents above.
|
|
||||||
// # (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek)
|
|
||||||
// # 0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I
|
|
||||||
// # 004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J
|
|
||||||
// # 012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK
|
|
||||||
// # 00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE
|
|
||||||
// # 00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE
|
|
||||||
// # 0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE
|
|
||||||
// ::NFD();
|
|
||||||
// I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307;
|
|
||||||
// J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307;
|
|
||||||
// I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307;
|
|
||||||
// I \u0300 (Ì) → i \u0307 \u0300;
|
|
||||||
// I \u0301 (Í) → i \u0307 \u0301;
|
|
||||||
// I \u0303 (Ĩ) → i \u0307 \u0303;
|
|
||||||
// ::Any-Lower();
|
|
||||||
// ::NFC();
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
if r := c.src[c.pSrc]; r < utf8.RuneSelf {
|
|
||||||
lower(c)
|
|
||||||
if r != 'I' && r != 'J' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p := norm.NFD.Properties(c.src[c.pSrc:])
|
|
||||||
if d := p.Decomposition(); len(d) >= 3 && (d[0] == 'I' || d[0] == 'J') {
|
|
||||||
// UTF-8 optimization: the decomposition will only have an above
|
|
||||||
// modifier if the last rune of the decomposition is in [U+300-U+311].
|
|
||||||
// In all other cases, a decomposition starting with I is always
|
|
||||||
// an I followed by modifiers that are not cased themselves. See A.2.
|
|
||||||
if d[1] == 0xCC && d[2] <= 0x91 { // A.2.4.
|
|
||||||
if !c.writeBytes(d[:1]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
c.dst[c.pDst-1] += 'a' - 'A' // lower
|
|
||||||
|
|
||||||
// Assumption: modifier never changes on lowercase. See A.1.
|
|
||||||
// Assumption: all modifiers added have CCC = Above. See A.2.3.
|
|
||||||
return c.writeString("\u0307") && c.writeBytes(d[1:])
|
|
||||||
}
|
|
||||||
// In all other cases the additional modifiers will have a CCC
|
|
||||||
// that is less than 230 (Above). We will insert the U+0307, if
|
|
||||||
// needed, after these modifiers so that a string in FCD form
|
|
||||||
// will remain so. See A.2.2.
|
|
||||||
lower(c)
|
|
||||||
i = 1
|
|
||||||
} else {
|
|
||||||
return lower(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for ; i < maxIgnorable && c.next(); i++ {
|
|
||||||
switch c.info.cccType() {
|
|
||||||
case cccZero:
|
|
||||||
c.unreadRune()
|
|
||||||
return true
|
|
||||||
case cccAbove:
|
|
||||||
return c.writeString("\u0307") && c.copy() // See A.1.
|
|
||||||
default:
|
|
||||||
c.copy() // See A.1.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i == maxIgnorable
|
|
||||||
}
|
|
||||||
|
|
||||||
// ltLowerSpan would be the same as isLower.
|
|
||||||
|
|
||||||
func ltUpper(f mapFunc) mapFunc {
|
|
||||||
return func(c *context) bool {
|
|
||||||
// Unicode:
|
|
||||||
// 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE
|
|
||||||
//
|
|
||||||
// From CLDR:
|
|
||||||
// # Remove \u0307 following soft-dotteds (i, j, and the like), with possible
|
|
||||||
// # intervening non-230 marks.
|
|
||||||
// ::NFD();
|
|
||||||
// [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ;
|
|
||||||
// ::Any-Upper();
|
|
||||||
// ::NFC();
|
|
||||||
|
|
||||||
// TODO: See A.5. A soft-dotted rune never has an exception. This would
|
|
||||||
// allow us to overload the exception bit and encode this property in
|
|
||||||
// info. Need to measure performance impact of this.
|
|
||||||
r, _ := utf8.DecodeRune(c.src[c.pSrc:])
|
|
||||||
oldPDst := c.pDst
|
|
||||||
if !f(c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !unicode.Is(unicode.Soft_Dotted, r) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't need to do an NFD normalization, as a soft-dotted rune never
|
|
||||||
// contains U+0307. See A.3.
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
for ; i < maxIgnorable && c.next(); i++ {
|
|
||||||
switch c.info.cccType() {
|
|
||||||
case cccZero:
|
|
||||||
c.unreadRune()
|
|
||||||
return true
|
|
||||||
case cccAbove:
|
|
||||||
if c.hasPrefix("\u0307") {
|
|
||||||
// We don't do a full NFC, but rather combine runes for
|
|
||||||
// some of the common cases. (Returning NFC or
|
|
||||||
// preserving normal form is neither a requirement nor
|
|
||||||
// a possibility anyway).
|
|
||||||
if !c.next() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if c.dst[oldPDst] == 'I' && c.pDst == oldPDst+1 && c.src[c.pSrc] == 0xcc {
|
|
||||||
s := ""
|
|
||||||
switch c.src[c.pSrc+1] {
|
|
||||||
case 0x80: // U+0300 COMBINING GRAVE ACCENT
|
|
||||||
s = "\u00cc" // U+00CC LATIN CAPITAL LETTER I WITH GRAVE
|
|
||||||
case 0x81: // U+0301 COMBINING ACUTE ACCENT
|
|
||||||
s = "\u00cd" // U+00CD LATIN CAPITAL LETTER I WITH ACUTE
|
|
||||||
case 0x83: // U+0303 COMBINING TILDE
|
|
||||||
s = "\u0128" // U+0128 LATIN CAPITAL LETTER I WITH TILDE
|
|
||||||
case 0x88: // U+0308 COMBINING DIAERESIS
|
|
||||||
s = "\u00cf" // U+00CF LATIN CAPITAL LETTER I WITH DIAERESIS
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
if s != "" {
|
|
||||||
c.pDst = oldPDst
|
|
||||||
return c.writeString(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.copy()
|
|
||||||
default:
|
|
||||||
c.copy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i == maxIgnorable
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: implement ltUpperSpan (low priority: complex and infrequent).
|
|
||||||
|
|
||||||
func aztrUpper(f mapFunc) mapFunc {
|
|
||||||
return func(c *context) bool {
|
|
||||||
// i→İ;
|
|
||||||
if c.src[c.pSrc] == 'i' {
|
|
||||||
return c.writeString("İ")
|
|
||||||
}
|
|
||||||
return f(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func aztrLower(c *context) (done bool) {
|
|
||||||
// From CLDR:
|
|
||||||
// # I and i-dotless; I-dot and i are case pairs in Turkish and Azeri
|
|
||||||
// # 0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE
|
|
||||||
// İ→i;
|
|
||||||
// # When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i.
|
|
||||||
// # This matches the behavior of the canonically equivalent I-dot_above
|
|
||||||
// # 0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE
|
|
||||||
// # When lowercasing, unless an I is before a dot_above, it turns into a dotless i.
|
|
||||||
// # 0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I
|
|
||||||
// I([^[:ccc=Not_Reordered:][:ccc=Above:]]*)\u0307 → i$1 ;
|
|
||||||
// I→ı ;
|
|
||||||
// ::Any-Lower();
|
|
||||||
if c.hasPrefix("\u0130") { // İ
|
|
||||||
return c.writeString("i")
|
|
||||||
}
|
|
||||||
if c.src[c.pSrc] != 'I' {
|
|
||||||
return lower(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We ignore the lower-case I for now, but insert it later when we know
|
|
||||||
// which form we need.
|
|
||||||
start := c.pSrc + c.sz
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
Loop:
|
|
||||||
// We check for up to n ignorables before \u0307. As \u0307 is an
|
|
||||||
// ignorable as well, n is maxIgnorable-1.
|
|
||||||
for ; i < maxIgnorable && c.next(); i++ {
|
|
||||||
switch c.info.cccType() {
|
|
||||||
case cccAbove:
|
|
||||||
if c.hasPrefix("\u0307") {
|
|
||||||
return c.writeString("i") && c.writeBytes(c.src[start:c.pSrc]) // ignore U+0307
|
|
||||||
}
|
|
||||||
done = true
|
|
||||||
break Loop
|
|
||||||
case cccZero:
|
|
||||||
c.unreadRune()
|
|
||||||
done = true
|
|
||||||
break Loop
|
|
||||||
default:
|
|
||||||
// We'll write this rune after we know which starter to use.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i == maxIgnorable {
|
|
||||||
done = true
|
|
||||||
}
|
|
||||||
return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done
|
|
||||||
}
|
|
||||||
|
|
||||||
// aztrLowerSpan would be the same as isLower.
|
|
||||||
|
|
||||||
func nlTitle(c *context) bool {
|
|
||||||
// From CLDR:
|
|
||||||
// # Special titlecasing for Dutch initial "ij".
|
|
||||||
// ::Any-Title();
|
|
||||||
// # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29)
|
|
||||||
// [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ;
|
|
||||||
if c.src[c.pSrc] != 'I' && c.src[c.pSrc] != 'i' {
|
|
||||||
return title(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.writeString("I") || !c.next() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if c.src[c.pSrc] == 'j' || c.src[c.pSrc] == 'J' {
|
|
||||||
return c.writeString("J")
|
|
||||||
}
|
|
||||||
c.unreadRune()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func nlTitleSpan(c *context) bool {
|
|
||||||
// From CLDR:
|
|
||||||
// # Special titlecasing for Dutch initial "ij".
|
|
||||||
// ::Any-Title();
|
|
||||||
// # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29)
|
|
||||||
// [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ;
|
|
||||||
if c.src[c.pSrc] != 'I' {
|
|
||||||
return isTitle(c)
|
|
||||||
}
|
|
||||||
if !c.next() || c.src[c.pSrc] == 'j' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if c.src[c.pSrc] != 'J' {
|
|
||||||
c.unreadRune()
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078.
|
|
||||||
func afnlRewrite(c *context) {
|
|
||||||
if c.hasPrefix("'") || c.hasPrefix("’") {
|
|
||||||
c.isMidWord = true
|
|
||||||
}
|
|
||||||
}
|
|
2211
vendor/golang.org/x/text/cases/tables.go
generated
vendored
2211
vendor/golang.org/x/text/cases/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
215
vendor/golang.org/x/text/cases/trieval.go
generated
vendored
215
vendor/golang.org/x/text/cases/trieval.go
generated
vendored
@ -1,215 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
package cases
|
|
||||||
|
|
||||||
// This file contains definitions for interpreting the trie value of the case
|
|
||||||
// trie generated by "go run gen*.go". It is shared by both the generator
|
|
||||||
// program and the resultant package. Sharing is achieved by the generator
|
|
||||||
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
|
||||||
|
|
||||||
// info holds case information for a single rune. It is the value returned
|
|
||||||
// by a trie lookup. Most mapping information can be stored in a single 16-bit
|
|
||||||
// value. If not, for example when a rune is mapped to multiple runes, the value
|
|
||||||
// stores some basic case data and an index into an array with additional data.
|
|
||||||
//
|
|
||||||
// The per-rune values have the following format:
|
|
||||||
//
|
|
||||||
// if (exception) {
|
|
||||||
// 15..5 unsigned exception index
|
|
||||||
// 4 unused
|
|
||||||
// } else {
|
|
||||||
// 15..8 XOR pattern or index to XOR pattern for case mapping
|
|
||||||
// Only 13..8 are used for XOR patterns.
|
|
||||||
// 7 inverseFold (fold to upper, not to lower)
|
|
||||||
// 6 index: interpret the XOR pattern as an index
|
|
||||||
// or isMid if case mode is cIgnorableUncased.
|
|
||||||
// 5..4 CCC: zero (normal or break), above or other
|
|
||||||
// }
|
|
||||||
// 3 exception: interpret this value as an exception index
|
|
||||||
// (TODO: is this bit necessary? Probably implied from case mode.)
|
|
||||||
// 2..0 case mode
|
|
||||||
//
|
|
||||||
// For the non-exceptional cases, a rune must be either uncased, lowercase or
|
|
||||||
// uppercase. If the rune is cased, the XOR pattern maps either a lowercase
|
|
||||||
// rune to uppercase or an uppercase rune to lowercase (applied to the 10
|
|
||||||
// least-significant bits of the rune).
|
|
||||||
//
|
|
||||||
// See the definitions below for a more detailed description of the various
|
|
||||||
// bits.
|
|
||||||
type info uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
casedMask = 0x0003
|
|
||||||
fullCasedMask = 0x0007
|
|
||||||
ignorableMask = 0x0006
|
|
||||||
ignorableValue = 0x0004
|
|
||||||
|
|
||||||
inverseFoldBit = 1 << 7
|
|
||||||
isMidBit = 1 << 6
|
|
||||||
|
|
||||||
exceptionBit = 1 << 3
|
|
||||||
exceptionShift = 5
|
|
||||||
numExceptionBits = 11
|
|
||||||
|
|
||||||
xorIndexBit = 1 << 6
|
|
||||||
xorShift = 8
|
|
||||||
|
|
||||||
// There is no mapping if all xor bits and the exception bit are zero.
|
|
||||||
hasMappingMask = 0xff80 | exceptionBit
|
|
||||||
)
|
|
||||||
|
|
||||||
// The case mode bits encodes the case type of a rune. This includes uncased,
|
|
||||||
// title, upper and lower case and case ignorable. (For a definition of these
|
|
||||||
// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare
|
|
||||||
// cases, a rune can be both cased and case-ignorable. This is encoded by
|
|
||||||
// cIgnorableCased. A rune of this type is always lower case. Some runes are
|
|
||||||
// cased while not having a mapping.
|
|
||||||
//
|
|
||||||
// A common pattern for scripts in the Unicode standard is for upper and lower
|
|
||||||
// case runes to alternate for increasing rune values (e.g. the accented Latin
|
|
||||||
// ranges starting from U+0100 and U+1E00 among others and some Cyrillic
|
|
||||||
// characters). We use this property by defining a cXORCase mode, where the case
|
|
||||||
// mode (always upper or lower case) is derived from the rune value. As the XOR
|
|
||||||
// pattern for case mappings is often identical for successive runes, using
|
|
||||||
// cXORCase can result in large series of identical trie values. This, in turn,
|
|
||||||
// allows us to better compress the trie blocks.
|
|
||||||
const (
|
|
||||||
cUncased info = iota // 000
|
|
||||||
cTitle // 001
|
|
||||||
cLower // 010
|
|
||||||
cUpper // 011
|
|
||||||
cIgnorableUncased // 100
|
|
||||||
cIgnorableCased // 101 // lower case if mappings exist
|
|
||||||
cXORCase // 11x // case is cLower | ((rune&1) ^ x)
|
|
||||||
|
|
||||||
maxCaseMode = cUpper
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c info) isCased() bool {
|
|
||||||
return c&casedMask != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isCaseIgnorable() bool {
|
|
||||||
return c&ignorableMask == ignorableValue
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isNotCasedAndNotCaseIgnorable() bool {
|
|
||||||
return c&fullCasedMask == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isCaseIgnorableAndNotCased() bool {
|
|
||||||
return c&fullCasedMask == cIgnorableUncased
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isMid() bool {
|
|
||||||
return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased
|
|
||||||
}
|
|
||||||
|
|
||||||
// The case mapping implementation will need to know about various Canonical
|
|
||||||
// Combining Class (CCC) values. We encode two of these in the trie value:
|
|
||||||
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that
|
|
||||||
// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that
|
|
||||||
// the rune also has the break category Break (see below).
|
|
||||||
const (
|
|
||||||
cccBreak info = iota << 4
|
|
||||||
cccZero
|
|
||||||
cccAbove
|
|
||||||
cccOther
|
|
||||||
|
|
||||||
cccMask = cccBreak | cccZero | cccAbove | cccOther
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
starter = 0
|
|
||||||
above = 230
|
|
||||||
iotaSubscript = 240
|
|
||||||
)
|
|
||||||
|
|
||||||
// The exceptions slice holds data that does not fit in a normal info entry.
|
|
||||||
// The entry is pointed to by the exception index in an entry. It has the
|
|
||||||
// following format:
|
|
||||||
//
|
|
||||||
// Header
|
|
||||||
// byte 0:
|
|
||||||
// 7..6 unused
|
|
||||||
// 5..4 CCC type (same bits as entry)
|
|
||||||
// 3 unused
|
|
||||||
// 2..0 length of fold
|
|
||||||
//
|
|
||||||
// byte 1:
|
|
||||||
// 7..6 unused
|
|
||||||
// 5..3 length of 1st mapping of case type
|
|
||||||
// 2..0 length of 2nd mapping of case type
|
|
||||||
//
|
|
||||||
// case 1st 2nd
|
|
||||||
// lower -> upper, title
|
|
||||||
// upper -> lower, title
|
|
||||||
// title -> lower, upper
|
|
||||||
//
|
|
||||||
// Lengths with the value 0x7 indicate no value and implies no change.
|
|
||||||
// A length of 0 indicates a mapping to zero-length string.
|
|
||||||
//
|
|
||||||
// Body bytes:
|
|
||||||
// case folding bytes
|
|
||||||
// lowercase mapping bytes
|
|
||||||
// uppercase mapping bytes
|
|
||||||
// titlecase mapping bytes
|
|
||||||
// closure mapping bytes (for NFKC_Casefold). (TODO)
|
|
||||||
//
|
|
||||||
// Fallbacks:
|
|
||||||
// missing fold -> lower
|
|
||||||
// missing title -> upper
|
|
||||||
// all missing -> original rune
|
|
||||||
//
|
|
||||||
// exceptions starts with a dummy byte to enforce that there is no zero index
|
|
||||||
// value.
|
|
||||||
const (
|
|
||||||
lengthMask = 0x07
|
|
||||||
lengthBits = 3
|
|
||||||
noChange = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
// References to generated trie.
|
|
||||||
|
|
||||||
var trie = newCaseTrie(0)
|
|
||||||
|
|
||||||
var sparse = sparseBlocks{
|
|
||||||
values: sparseValues[:],
|
|
||||||
offsets: sparseOffsets[:],
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sparse block lookup code.
|
|
||||||
|
|
||||||
// valueRange is an entry in a sparse block.
|
|
||||||
type valueRange struct {
|
|
||||||
value uint16
|
|
||||||
lo, hi byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type sparseBlocks struct {
|
|
||||||
values []valueRange
|
|
||||||
offsets []uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
// lookup returns the value from values block n for byte b using binary search.
|
|
||||||
func (s *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
|
||||||
lo := s.offsets[n]
|
|
||||||
hi := s.offsets[n+1]
|
|
||||||
for lo < hi {
|
|
||||||
m := lo + (hi-lo)/2
|
|
||||||
r := s.values[m]
|
|
||||||
if r.lo <= b && b <= r.hi {
|
|
||||||
return r.value
|
|
||||||
}
|
|
||||||
if b < r.lo {
|
|
||||||
hi = m
|
|
||||||
} else {
|
|
||||||
lo = m + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// lastRuneForTesting is the last rune used for testing. Everything after this
|
|
||||||
// is boring.
|
|
||||||
const lastRuneForTesting = rune(0x1FFFF)
|
|
51
vendor/golang.org/x/text/internal/internal.go
generated
vendored
51
vendor/golang.org/x/text/internal/internal.go
generated
vendored
@ -1,51 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
//go:generate go run gen.go
|
|
||||||
|
|
||||||
// Package internal contains non-exported functionality that are used by
|
|
||||||
// packages in the text repository.
|
|
||||||
package internal // import "golang.org/x/text/internal"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SortTags sorts tags in place.
|
|
||||||
func SortTags(tags []language.Tag) {
|
|
||||||
sort.Sort(sorter(tags))
|
|
||||||
}
|
|
||||||
|
|
||||||
type sorter []language.Tag
|
|
||||||
|
|
||||||
func (s sorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sorter) Less(i, j int) bool {
|
|
||||||
return s[i].String() < s[j].String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueTags sorts and filters duplicate tags in place and returns a slice with
|
|
||||||
// only unique tags.
|
|
||||||
func UniqueTags(tags []language.Tag) []language.Tag {
|
|
||||||
if len(tags) <= 1 {
|
|
||||||
return tags
|
|
||||||
}
|
|
||||||
SortTags(tags)
|
|
||||||
k := 0
|
|
||||||
for i := 1; i < len(tags); i++ {
|
|
||||||
if tags[k].String() < tags[i].String() {
|
|
||||||
k++
|
|
||||||
tags[k] = tags[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tags[:k+1]
|
|
||||||
}
|
|
67
vendor/golang.org/x/text/internal/match.go
generated
vendored
67
vendor/golang.org/x/text/internal/match.go
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
// This file contains matchers that implement CLDR inheritance.
|
|
||||||
//
|
|
||||||
// See http://unicode.org/reports/tr35/#Locale_Inheritance.
|
|
||||||
//
|
|
||||||
// Some of the inheritance described in this document is already handled by
|
|
||||||
// the cldr package.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: consider if (some of the) matching algorithm needs to be public after
|
|
||||||
// getting some feel about what is generic and what is specific.
|
|
||||||
|
|
||||||
// NewInheritanceMatcher returns a matcher that matches based on the inheritance
|
|
||||||
// chain.
|
|
||||||
//
|
|
||||||
// The matcher uses canonicalization and the parent relationship to find a
|
|
||||||
// match. The resulting match will always be either Und or a language with the
|
|
||||||
// same language and script as the requested language. It will not match
|
|
||||||
// languages for which there is understood to be mutual or one-directional
|
|
||||||
// intelligibility.
|
|
||||||
//
|
|
||||||
// A Match will indicate an Exact match if the language matches after
|
|
||||||
// canonicalization and High if the matched tag is a parent.
|
|
||||||
func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher {
|
|
||||||
tags := &InheritanceMatcher{make(map[language.Tag]int)}
|
|
||||||
for i, tag := range t {
|
|
||||||
ct, err := language.All.Canonicalize(tag)
|
|
||||||
if err != nil {
|
|
||||||
ct = tag
|
|
||||||
}
|
|
||||||
tags.index[ct] = i
|
|
||||||
}
|
|
||||||
return tags
|
|
||||||
}
|
|
||||||
|
|
||||||
type InheritanceMatcher struct {
|
|
||||||
index map[language.Tag]int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) {
|
|
||||||
for _, t := range want {
|
|
||||||
ct, err := language.All.Canonicalize(t)
|
|
||||||
if err != nil {
|
|
||||||
ct = t
|
|
||||||
}
|
|
||||||
conf := language.Exact
|
|
||||||
for {
|
|
||||||
if index, ok := m.index[ct]; ok {
|
|
||||||
return ct, index, conf
|
|
||||||
}
|
|
||||||
if ct == language.Und {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ct = ct.Parent()
|
|
||||||
conf = language.High
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return language.Und, 0, language.No
|
|
||||||
}
|
|
116
vendor/golang.org/x/text/internal/tables.go
generated
vendored
116
vendor/golang.org/x/text/internal/tables.go
generated
vendored
@ -1,116 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
// Parent maps a compact index of a tag to the compact index of the parent of
|
|
||||||
// this tag.
|
|
||||||
var Parent = []uint16{ // 752 elements
|
|
||||||
// Entry 0 - 3F
|
|
||||||
0x0000, 0x0053, 0x00e5, 0x0000, 0x0003, 0x0003, 0x0000, 0x0006,
|
|
||||||
0x0000, 0x0008, 0x0000, 0x000a, 0x0000, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
|
|
||||||
0x000c, 0x0000, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x002e,
|
|
||||||
0x0000, 0x0000, 0x0031, 0x0030, 0x0030, 0x0000, 0x0035, 0x0000,
|
|
||||||
0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x003d, 0x0000,
|
|
||||||
// Entry 40 - 7F
|
|
||||||
0x0000, 0x0040, 0x0000, 0x0042, 0x0042, 0x0000, 0x0045, 0x0045,
|
|
||||||
0x0000, 0x0048, 0x0000, 0x004a, 0x0000, 0x0000, 0x004d, 0x004c,
|
|
||||||
0x004c, 0x0000, 0x0051, 0x0051, 0x0051, 0x0051, 0x0000, 0x0056,
|
|
||||||
0x0000, 0x0058, 0x0000, 0x005a, 0x0000, 0x005c, 0x005c, 0x0000,
|
|
||||||
0x005f, 0x0000, 0x0061, 0x0000, 0x0063, 0x0000, 0x0065, 0x0065,
|
|
||||||
0x0000, 0x0068, 0x0000, 0x006a, 0x006a, 0x006a, 0x006a, 0x006a,
|
|
||||||
0x006a, 0x006a, 0x0000, 0x0072, 0x0000, 0x0074, 0x0000, 0x0076,
|
|
||||||
0x0000, 0x0000, 0x0079, 0x0000, 0x007b, 0x0000, 0x007d, 0x0000,
|
|
||||||
// Entry 80 - BF
|
|
||||||
0x007f, 0x007f, 0x0000, 0x0082, 0x0082, 0x0000, 0x0085, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0085, 0x0087, 0x0086, 0x0086, 0x0086, 0x0085,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0087, 0x0086, 0x0086,
|
|
||||||
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0086, 0x0085, 0x0086,
|
|
||||||
// Entry C0 - FF
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0085,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0086,
|
|
||||||
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
|
|
||||||
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0085, 0x0086, 0x0086,
|
|
||||||
0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0000, 0x00ee,
|
|
||||||
0x0000, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1,
|
|
||||||
0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f0, 0x00f0, 0x00f1, 0x00f1,
|
|
||||||
// Entry 100 - 13F
|
|
||||||
0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f1,
|
|
||||||
0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x0000, 0x010c, 0x0000, 0x010e,
|
|
||||||
0x0000, 0x0110, 0x0000, 0x0112, 0x0112, 0x0000, 0x0115, 0x0115,
|
|
||||||
0x0115, 0x0115, 0x0000, 0x011a, 0x0000, 0x011c, 0x0000, 0x011e,
|
|
||||||
0x011e, 0x0000, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
|
|
||||||
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
|
|
||||||
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
|
|
||||||
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
|
|
||||||
// Entry 140 - 17F
|
|
||||||
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
|
|
||||||
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
|
|
||||||
0x0000, 0x0150, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156,
|
|
||||||
0x0000, 0x0158, 0x0000, 0x015a, 0x015a, 0x015a, 0x0000, 0x015e,
|
|
||||||
0x0000, 0x0000, 0x0161, 0x0000, 0x0163, 0x0000, 0x0165, 0x0165,
|
|
||||||
0x0165, 0x0000, 0x0169, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000,
|
|
||||||
0x016f, 0x016f, 0x0000, 0x0172, 0x0000, 0x0174, 0x0000, 0x0176,
|
|
||||||
0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e,
|
|
||||||
// Entry 180 - 1BF
|
|
||||||
0x0000, 0x0180, 0x0180, 0x0180, 0x0000, 0x0000, 0x0185, 0x0000,
|
|
||||||
0x0000, 0x0188, 0x0000, 0x018a, 0x0000, 0x0000, 0x018d, 0x0000,
|
|
||||||
0x018f, 0x0000, 0x0000, 0x0192, 0x0000, 0x0000, 0x0195, 0x0000,
|
|
||||||
0x0197, 0x0000, 0x0199, 0x0000, 0x019b, 0x0000, 0x019d, 0x0000,
|
|
||||||
0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000,
|
|
||||||
0x01a7, 0x0000, 0x01a9, 0x01a9, 0x0000, 0x01ac, 0x0000, 0x01ae,
|
|
||||||
0x0000, 0x01b0, 0x0000, 0x01b2, 0x0000, 0x01b4, 0x0000, 0x0000,
|
|
||||||
0x01b7, 0x0000, 0x01b9, 0x0000, 0x01bb, 0x0000, 0x01bd, 0x0000,
|
|
||||||
// Entry 1C0 - 1FF
|
|
||||||
0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x01c3, 0x01c3, 0x01c3,
|
|
||||||
0x0000, 0x01c8, 0x0000, 0x01ca, 0x01ca, 0x0000, 0x01cd, 0x0000,
|
|
||||||
0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000,
|
|
||||||
0x01d7, 0x01d7, 0x0000, 0x01da, 0x0000, 0x01dc, 0x0000, 0x01de,
|
|
||||||
0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
|
|
||||||
0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x01ec, 0x01ec,
|
|
||||||
0x0000, 0x01f0, 0x0000, 0x01f2, 0x0000, 0x01f4, 0x0000, 0x01f6,
|
|
||||||
0x0000, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x01fb, 0x0000, 0x01fe,
|
|
||||||
// Entry 200 - 23F
|
|
||||||
0x0000, 0x0200, 0x0200, 0x0000, 0x0203, 0x0203, 0x0000, 0x0206,
|
|
||||||
0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0000, 0x020e,
|
|
||||||
0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0000, 0x0000, 0x0000,
|
|
||||||
0x0000, 0x0218, 0x0000, 0x0000, 0x021b, 0x0000, 0x021d, 0x021d,
|
|
||||||
0x0000, 0x0220, 0x0000, 0x0222, 0x0222, 0x0000, 0x0000, 0x0226,
|
|
||||||
0x0225, 0x0225, 0x0000, 0x0000, 0x022b, 0x0000, 0x022d, 0x0000,
|
|
||||||
0x022f, 0x0000, 0x023b, 0x0231, 0x023b, 0x023b, 0x023b, 0x023b,
|
|
||||||
0x023b, 0x023b, 0x023b, 0x0231, 0x023b, 0x023b, 0x0000, 0x023e,
|
|
||||||
// Entry 240 - 27F
|
|
||||||
0x023e, 0x023e, 0x0000, 0x0242, 0x0000, 0x0244, 0x0000, 0x0246,
|
|
||||||
0x0246, 0x0000, 0x0249, 0x0000, 0x024b, 0x024b, 0x024b, 0x024b,
|
|
||||||
0x024b, 0x024b, 0x0000, 0x0252, 0x0000, 0x0254, 0x0000, 0x0256,
|
|
||||||
0x0000, 0x0258, 0x0000, 0x025a, 0x0000, 0x0000, 0x025d, 0x025d,
|
|
||||||
0x025d, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000,
|
|
||||||
0x0000, 0x0268, 0x0267, 0x0267, 0x0000, 0x026c, 0x0000, 0x026e,
|
|
||||||
0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0000, 0x0275, 0x0000,
|
|
||||||
0x0000, 0x0278, 0x0000, 0x027a, 0x027a, 0x027a, 0x027a, 0x0000,
|
|
||||||
// Entry 280 - 2BF
|
|
||||||
0x027f, 0x027f, 0x027f, 0x0000, 0x0283, 0x0283, 0x0283, 0x0283,
|
|
||||||
0x0283, 0x0000, 0x0289, 0x0289, 0x0289, 0x0289, 0x0000, 0x0000,
|
|
||||||
0x0000, 0x0000, 0x0291, 0x0291, 0x0291, 0x0000, 0x0295, 0x0295,
|
|
||||||
0x0295, 0x0295, 0x0000, 0x0000, 0x029b, 0x029b, 0x029b, 0x029b,
|
|
||||||
0x0000, 0x02a0, 0x0000, 0x02a2, 0x02a2, 0x0000, 0x02a5, 0x0000,
|
|
||||||
0x02a7, 0x02a7, 0x0000, 0x0000, 0x02ab, 0x0000, 0x0000, 0x02ae,
|
|
||||||
0x0000, 0x02b0, 0x02b0, 0x0000, 0x0000, 0x02b4, 0x0000, 0x02b6,
|
|
||||||
0x0000, 0x02b8, 0x0000, 0x02ba, 0x0000, 0x02bc, 0x02bc, 0x0000,
|
|
||||||
// Entry 2C0 - 2FF
|
|
||||||
0x0000, 0x02c0, 0x0000, 0x02c2, 0x02bf, 0x02bf, 0x0000, 0x0000,
|
|
||||||
0x02c7, 0x02c6, 0x02c6, 0x0000, 0x0000, 0x02cc, 0x0000, 0x02ce,
|
|
||||||
0x0000, 0x02d0, 0x0000, 0x0000, 0x02d3, 0x0000, 0x0000, 0x0000,
|
|
||||||
0x02d7, 0x0000, 0x02d9, 0x0000, 0x02db, 0x0000, 0x02dd, 0x02dd,
|
|
||||||
0x0000, 0x02e0, 0x0000, 0x02e2, 0x0000, 0x02e4, 0x02e4, 0x02e4,
|
|
||||||
0x02e4, 0x02e4, 0x0000, 0x02ea, 0x02eb, 0x02ea, 0x0000, 0x02ee,
|
|
||||||
} // Size: 1528 bytes
|
|
||||||
|
|
||||||
// Total table size 1528 bytes (1KiB); checksum: B99CF952
|
|
100
vendor/golang.org/x/text/internal/tag/tag.go
generated
vendored
100
vendor/golang.org/x/text/internal/tag/tag.go
generated
vendored
@ -1,100 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package tag contains functionality handling tags and related data.
|
|
||||||
package tag // import "golang.org/x/text/internal/tag"
|
|
||||||
|
|
||||||
import "sort"
|
|
||||||
|
|
||||||
// An Index converts tags to a compact numeric value.
|
|
||||||
//
|
|
||||||
// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can
|
|
||||||
// be used to store additional information about the tag.
|
|
||||||
type Index string
|
|
||||||
|
|
||||||
// Elem returns the element data at the given index.
|
|
||||||
func (s Index) Elem(x int) string {
|
|
||||||
return string(s[x*4 : x*4+4])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index reports the index of the given key or -1 if it could not be found.
|
|
||||||
// Only the first len(key) bytes from the start of the 4-byte entries will be
|
|
||||||
// considered for the search and the first match in Index will be returned.
|
|
||||||
func (s Index) Index(key []byte) int {
|
|
||||||
n := len(key)
|
|
||||||
// search the index of the first entry with an equal or higher value than
|
|
||||||
// key in s.
|
|
||||||
index := sort.Search(len(s)/4, func(i int) bool {
|
|
||||||
return cmp(s[i*4:i*4+n], key) != -1
|
|
||||||
})
|
|
||||||
i := index * 4
|
|
||||||
if cmp(s[i:i+len(key)], key) != 0 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return index
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next finds the next occurrence of key after index x, which must have been
|
|
||||||
// obtained from a call to Index using the same key. It returns x+1 or -1.
|
|
||||||
func (s Index) Next(key []byte, x int) int {
|
|
||||||
if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 {
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// cmp returns an integer comparing a and b lexicographically.
|
|
||||||
func cmp(a Index, b []byte) int {
|
|
||||||
n := len(a)
|
|
||||||
if len(b) < n {
|
|
||||||
n = len(b)
|
|
||||||
}
|
|
||||||
for i, c := range b[:n] {
|
|
||||||
switch {
|
|
||||||
case a[i] > c:
|
|
||||||
return 1
|
|
||||||
case a[i] < c:
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case len(a) < len(b):
|
|
||||||
return -1
|
|
||||||
case len(a) > len(b):
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare returns an integer comparing a and b lexicographically.
|
|
||||||
func Compare(a string, b []byte) int {
|
|
||||||
return cmp(Index(a), b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FixCase reformats b to the same pattern of cases as form.
|
|
||||||
// If returns false if string b is malformed.
|
|
||||||
func FixCase(form string, b []byte) bool {
|
|
||||||
if len(form) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, c := range b {
|
|
||||||
if form[i] <= 'Z' {
|
|
||||||
if c >= 'a' {
|
|
||||||
c -= 'z' - 'Z'
|
|
||||||
}
|
|
||||||
if c < 'A' || 'Z' < c {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if c <= 'Z' {
|
|
||||||
c += 'z' - 'Z'
|
|
||||||
}
|
|
||||||
if c < 'a' || 'z' < c {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b[i] = c
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
16
vendor/golang.org/x/text/language/common.go
generated
vendored
16
vendor/golang.org/x/text/language/common.go
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
// This file contains code common to the maketables.go and the package code.
|
|
||||||
|
|
||||||
// langAliasType is the type of an alias in langAliasMap.
|
|
||||||
type langAliasType int8
|
|
||||||
|
|
||||||
const (
|
|
||||||
langDeprecated langAliasType = iota
|
|
||||||
langMacro
|
|
||||||
langLegacy
|
|
||||||
|
|
||||||
langAliasTypeUnknown langAliasType = -1
|
|
||||||
)
|
|
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
@ -1,197 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The Coverage interface is used to define the level of coverage of an
|
|
||||||
// internationalization service. Note that not all types are supported by all
|
|
||||||
// services. As lists may be generated on the fly, it is recommended that users
|
|
||||||
// of a Coverage cache the results.
|
|
||||||
type Coverage interface {
|
|
||||||
// Tags returns the list of supported tags.
|
|
||||||
Tags() []Tag
|
|
||||||
|
|
||||||
// BaseLanguages returns the list of supported base languages.
|
|
||||||
BaseLanguages() []Base
|
|
||||||
|
|
||||||
// Scripts returns the list of supported scripts.
|
|
||||||
Scripts() []Script
|
|
||||||
|
|
||||||
// Regions returns the list of supported regions.
|
|
||||||
Regions() []Region
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Supported defines a Coverage that lists all supported subtags. Tags
|
|
||||||
// always returns nil.
|
|
||||||
Supported Coverage = allSubtags{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO:
|
|
||||||
// - Support Variants, numbering systems.
|
|
||||||
// - CLDR coverage levels.
|
|
||||||
// - Set of common tags defined in this package.
|
|
||||||
|
|
||||||
type allSubtags struct{}
|
|
||||||
|
|
||||||
// Regions returns the list of supported regions. As all regions are in a
|
|
||||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
|
||||||
// The "undefined" region is not returned.
|
|
||||||
func (s allSubtags) Regions() []Region {
|
|
||||||
reg := make([]Region, numRegions)
|
|
||||||
for i := range reg {
|
|
||||||
reg[i] = Region{regionID(i + 1)}
|
|
||||||
}
|
|
||||||
return reg
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scripts returns the list of supported scripts. As all scripts are in a
|
|
||||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
|
||||||
// The "undefined" script is not returned.
|
|
||||||
func (s allSubtags) Scripts() []Script {
|
|
||||||
scr := make([]Script, numScripts)
|
|
||||||
for i := range scr {
|
|
||||||
scr[i] = Script{scriptID(i + 1)}
|
|
||||||
}
|
|
||||||
return scr
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseLanguages returns the list of all supported base languages. It generates
|
|
||||||
// the list by traversing the internal structures.
|
|
||||||
func (s allSubtags) BaseLanguages() []Base {
|
|
||||||
base := make([]Base, 0, numLanguages)
|
|
||||||
for i := 0; i < langNoIndexOffset; i++ {
|
|
||||||
// We included "und" already for the value 0.
|
|
||||||
if i != nonCanonicalUnd {
|
|
||||||
base = append(base, Base{langID(i)})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
i := langNoIndexOffset
|
|
||||||
for _, v := range langNoIndex {
|
|
||||||
for k := 0; k < 8; k++ {
|
|
||||||
if v&1 == 1 {
|
|
||||||
base = append(base, Base{langID(i)})
|
|
||||||
}
|
|
||||||
v >>= 1
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return base
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tags always returns nil.
|
|
||||||
func (s allSubtags) Tags() []Tag {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// coverage is used used by NewCoverage which is used as a convenient way for
|
|
||||||
// creating Coverage implementations for partially defined data. Very often a
|
|
||||||
// package will only need to define a subset of slices. coverage provides a
|
|
||||||
// convenient way to do this. Moreover, packages using NewCoverage, instead of
|
|
||||||
// their own implementation, will not break if later new slice types are added.
|
|
||||||
type coverage struct {
|
|
||||||
tags func() []Tag
|
|
||||||
bases func() []Base
|
|
||||||
scripts func() []Script
|
|
||||||
regions func() []Region
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *coverage) Tags() []Tag {
|
|
||||||
if s.tags == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.tags()
|
|
||||||
}
|
|
||||||
|
|
||||||
// bases implements sort.Interface and is used to sort base languages.
|
|
||||||
type bases []Base
|
|
||||||
|
|
||||||
func (b bases) Len() int {
|
|
||||||
return len(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b bases) Swap(i, j int) {
|
|
||||||
b[i], b[j] = b[j], b[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b bases) Less(i, j int) bool {
|
|
||||||
return b[i].langID < b[j].langID
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseLanguages returns the result from calling s.bases if it is specified or
|
|
||||||
// otherwise derives the set of supported base languages from tags.
|
|
||||||
func (s *coverage) BaseLanguages() []Base {
|
|
||||||
if s.bases == nil {
|
|
||||||
tags := s.Tags()
|
|
||||||
if len(tags) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
a := make([]Base, len(tags))
|
|
||||||
for i, t := range tags {
|
|
||||||
a[i] = Base{langID(t.lang)}
|
|
||||||
}
|
|
||||||
sort.Sort(bases(a))
|
|
||||||
k := 0
|
|
||||||
for i := 1; i < len(a); i++ {
|
|
||||||
if a[k] != a[i] {
|
|
||||||
k++
|
|
||||||
a[k] = a[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return a[:k+1]
|
|
||||||
}
|
|
||||||
return s.bases()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *coverage) Scripts() []Script {
|
|
||||||
if s.scripts == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.scripts()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *coverage) Regions() []Region {
|
|
||||||
if s.regions == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.regions()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCoverage returns a Coverage for the given lists. It is typically used by
|
|
||||||
// packages providing internationalization services to define their level of
|
|
||||||
// coverage. A list may be of type []T or func() []T, where T is either Tag,
|
|
||||||
// Base, Script or Region. The returned Coverage derives the value for Bases
|
|
||||||
// from Tags if no func or slice for []Base is specified. For other unspecified
|
|
||||||
// types the returned Coverage will return nil for the respective methods.
|
|
||||||
func NewCoverage(list ...interface{}) Coverage {
|
|
||||||
s := &coverage{}
|
|
||||||
for _, x := range list {
|
|
||||||
switch v := x.(type) {
|
|
||||||
case func() []Base:
|
|
||||||
s.bases = v
|
|
||||||
case func() []Script:
|
|
||||||
s.scripts = v
|
|
||||||
case func() []Region:
|
|
||||||
s.regions = v
|
|
||||||
case func() []Tag:
|
|
||||||
s.tags = v
|
|
||||||
case []Base:
|
|
||||||
s.bases = func() []Base { return v }
|
|
||||||
case []Script:
|
|
||||||
s.scripts = func() []Script { return v }
|
|
||||||
case []Region:
|
|
||||||
s.regions = func() []Region { return v }
|
|
||||||
case []Tag:
|
|
||||||
s.tags = func() []Tag { return v }
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("language: unsupported set type %T", v))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
@ -1,38 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.2
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
import "sort"
|
|
||||||
|
|
||||||
func sortStable(s sort.Interface) {
|
|
||||||
ss := stableSort{
|
|
||||||
s: s,
|
|
||||||
pos: make([]int, s.Len()),
|
|
||||||
}
|
|
||||||
for i := range ss.pos {
|
|
||||||
ss.pos[i] = i
|
|
||||||
}
|
|
||||||
sort.Sort(&ss)
|
|
||||||
}
|
|
||||||
|
|
||||||
type stableSort struct {
|
|
||||||
s sort.Interface
|
|
||||||
pos []int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stableSort) Len() int {
|
|
||||||
return len(s.pos)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stableSort) Less(i, j int) bool {
|
|
||||||
return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stableSort) Swap(i, j int) {
|
|
||||||
s.s.Swap(i, j)
|
|
||||||
s.pos[i], s.pos[j] = s.pos[j], s.pos[i]
|
|
||||||
}
|
|
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.2
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
import "sort"
|
|
||||||
|
|
||||||
var sortStable = sort.Stable
|
|
767
vendor/golang.org/x/text/language/index.go
generated
vendored
767
vendor/golang.org/x/text/language/index.go
generated
vendored
@ -1,767 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
// NumCompactTags is the number of common tags. The maximum tag is
|
|
||||||
// NumCompactTags-1.
|
|
||||||
const NumCompactTags = 752
|
|
||||||
|
|
||||||
var specialTags = []Tag{ // 2 elements
|
|
||||||
0: {lang: 0xd5, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"},
|
|
||||||
1: {lang: 0x134, region: 0x134, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"},
|
|
||||||
} // Size: 72 bytes
|
|
||||||
|
|
||||||
var coreTags = map[uint32]uint16{
|
|
||||||
0x0: 0, // und
|
|
||||||
0x01500000: 3, // af
|
|
||||||
0x015000d1: 4, // af-NA
|
|
||||||
0x01500160: 5, // af-ZA
|
|
||||||
0x01b00000: 6, // agq
|
|
||||||
0x01b00051: 7, // agq-CM
|
|
||||||
0x02000000: 8, // ak
|
|
||||||
0x0200007f: 9, // ak-GH
|
|
||||||
0x02600000: 10, // am
|
|
||||||
0x0260006e: 11, // am-ET
|
|
||||||
0x03900000: 12, // ar
|
|
||||||
0x03900001: 13, // ar-001
|
|
||||||
0x03900022: 14, // ar-AE
|
|
||||||
0x03900038: 15, // ar-BH
|
|
||||||
0x03900061: 16, // ar-DJ
|
|
||||||
0x03900066: 17, // ar-DZ
|
|
||||||
0x0390006a: 18, // ar-EG
|
|
||||||
0x0390006b: 19, // ar-EH
|
|
||||||
0x0390006c: 20, // ar-ER
|
|
||||||
0x03900096: 21, // ar-IL
|
|
||||||
0x0390009a: 22, // ar-IQ
|
|
||||||
0x039000a0: 23, // ar-JO
|
|
||||||
0x039000a7: 24, // ar-KM
|
|
||||||
0x039000ab: 25, // ar-KW
|
|
||||||
0x039000af: 26, // ar-LB
|
|
||||||
0x039000b8: 27, // ar-LY
|
|
||||||
0x039000b9: 28, // ar-MA
|
|
||||||
0x039000c8: 29, // ar-MR
|
|
||||||
0x039000e0: 30, // ar-OM
|
|
||||||
0x039000ec: 31, // ar-PS
|
|
||||||
0x039000f2: 32, // ar-QA
|
|
||||||
0x03900107: 33, // ar-SA
|
|
||||||
0x0390010a: 34, // ar-SD
|
|
||||||
0x03900114: 35, // ar-SO
|
|
||||||
0x03900116: 36, // ar-SS
|
|
||||||
0x0390011b: 37, // ar-SY
|
|
||||||
0x0390011f: 38, // ar-TD
|
|
||||||
0x03900127: 39, // ar-TN
|
|
||||||
0x0390015d: 40, // ar-YE
|
|
||||||
0x03f00000: 41, // ars
|
|
||||||
0x04200000: 42, // as
|
|
||||||
0x04200098: 43, // as-IN
|
|
||||||
0x04300000: 44, // asa
|
|
||||||
0x0430012e: 45, // asa-TZ
|
|
||||||
0x04700000: 46, // ast
|
|
||||||
0x0470006d: 47, // ast-ES
|
|
||||||
0x05700000: 48, // az
|
|
||||||
0x0571e000: 49, // az-Cyrl
|
|
||||||
0x0571e031: 50, // az-Cyrl-AZ
|
|
||||||
0x05752000: 51, // az-Latn
|
|
||||||
0x05752031: 52, // az-Latn-AZ
|
|
||||||
0x05d00000: 53, // bas
|
|
||||||
0x05d00051: 54, // bas-CM
|
|
||||||
0x07000000: 55, // be
|
|
||||||
0x07000046: 56, // be-BY
|
|
||||||
0x07400000: 57, // bem
|
|
||||||
0x07400161: 58, // bem-ZM
|
|
||||||
0x07800000: 59, // bez
|
|
||||||
0x0780012e: 60, // bez-TZ
|
|
||||||
0x07d00000: 61, // bg
|
|
||||||
0x07d00037: 62, // bg-BG
|
|
||||||
0x08100000: 63, // bh
|
|
||||||
0x09e00000: 64, // bm
|
|
||||||
0x09e000c2: 65, // bm-ML
|
|
||||||
0x0a300000: 66, // bn
|
|
||||||
0x0a300034: 67, // bn-BD
|
|
||||||
0x0a300098: 68, // bn-IN
|
|
||||||
0x0a700000: 69, // bo
|
|
||||||
0x0a700052: 70, // bo-CN
|
|
||||||
0x0a700098: 71, // bo-IN
|
|
||||||
0x0b000000: 72, // br
|
|
||||||
0x0b000077: 73, // br-FR
|
|
||||||
0x0b300000: 74, // brx
|
|
||||||
0x0b300098: 75, // brx-IN
|
|
||||||
0x0b500000: 76, // bs
|
|
||||||
0x0b51e000: 77, // bs-Cyrl
|
|
||||||
0x0b51e032: 78, // bs-Cyrl-BA
|
|
||||||
0x0b552000: 79, // bs-Latn
|
|
||||||
0x0b552032: 80, // bs-Latn-BA
|
|
||||||
0x0d500000: 81, // ca
|
|
||||||
0x0d500021: 82, // ca-AD
|
|
||||||
0x0d50006d: 83, // ca-ES
|
|
||||||
0x0d500077: 84, // ca-FR
|
|
||||||
0x0d50009d: 85, // ca-IT
|
|
||||||
0x0da00000: 86, // ce
|
|
||||||
0x0da00105: 87, // ce-RU
|
|
||||||
0x0dd00000: 88, // cgg
|
|
||||||
0x0dd00130: 89, // cgg-UG
|
|
||||||
0x0e300000: 90, // chr
|
|
||||||
0x0e300134: 91, // chr-US
|
|
||||||
0x0e700000: 92, // ckb
|
|
||||||
0x0e70009a: 93, // ckb-IQ
|
|
||||||
0x0e70009b: 94, // ckb-IR
|
|
||||||
0x0f600000: 95, // cs
|
|
||||||
0x0f60005d: 96, // cs-CZ
|
|
||||||
0x0fa00000: 97, // cu
|
|
||||||
0x0fa00105: 98, // cu-RU
|
|
||||||
0x0fc00000: 99, // cy
|
|
||||||
0x0fc0007a: 100, // cy-GB
|
|
||||||
0x0fd00000: 101, // da
|
|
||||||
0x0fd00062: 102, // da-DK
|
|
||||||
0x0fd00081: 103, // da-GL
|
|
||||||
0x10400000: 104, // dav
|
|
||||||
0x104000a3: 105, // dav-KE
|
|
||||||
0x10900000: 106, // de
|
|
||||||
0x1090002d: 107, // de-AT
|
|
||||||
0x10900035: 108, // de-BE
|
|
||||||
0x1090004d: 109, // de-CH
|
|
||||||
0x1090005f: 110, // de-DE
|
|
||||||
0x1090009d: 111, // de-IT
|
|
||||||
0x109000b1: 112, // de-LI
|
|
||||||
0x109000b6: 113, // de-LU
|
|
||||||
0x11300000: 114, // dje
|
|
||||||
0x113000d3: 115, // dje-NE
|
|
||||||
0x11b00000: 116, // dsb
|
|
||||||
0x11b0005f: 117, // dsb-DE
|
|
||||||
0x12000000: 118, // dua
|
|
||||||
0x12000051: 119, // dua-CM
|
|
||||||
0x12400000: 120, // dv
|
|
||||||
0x12700000: 121, // dyo
|
|
||||||
0x12700113: 122, // dyo-SN
|
|
||||||
0x12900000: 123, // dz
|
|
||||||
0x12900042: 124, // dz-BT
|
|
||||||
0x12b00000: 125, // ebu
|
|
||||||
0x12b000a3: 126, // ebu-KE
|
|
||||||
0x12c00000: 127, // ee
|
|
||||||
0x12c0007f: 128, // ee-GH
|
|
||||||
0x12c00121: 129, // ee-TG
|
|
||||||
0x13100000: 130, // el
|
|
||||||
0x1310005c: 131, // el-CY
|
|
||||||
0x13100086: 132, // el-GR
|
|
||||||
0x13400000: 133, // en
|
|
||||||
0x13400001: 134, // en-001
|
|
||||||
0x1340001a: 135, // en-150
|
|
||||||
0x13400024: 136, // en-AG
|
|
||||||
0x13400025: 137, // en-AI
|
|
||||||
0x1340002c: 138, // en-AS
|
|
||||||
0x1340002d: 139, // en-AT
|
|
||||||
0x1340002e: 140, // en-AU
|
|
||||||
0x13400033: 141, // en-BB
|
|
||||||
0x13400035: 142, // en-BE
|
|
||||||
0x13400039: 143, // en-BI
|
|
||||||
0x1340003c: 144, // en-BM
|
|
||||||
0x13400041: 145, // en-BS
|
|
||||||
0x13400045: 146, // en-BW
|
|
||||||
0x13400047: 147, // en-BZ
|
|
||||||
0x13400048: 148, // en-CA
|
|
||||||
0x13400049: 149, // en-CC
|
|
||||||
0x1340004d: 150, // en-CH
|
|
||||||
0x1340004f: 151, // en-CK
|
|
||||||
0x13400051: 152, // en-CM
|
|
||||||
0x1340005b: 153, // en-CX
|
|
||||||
0x1340005c: 154, // en-CY
|
|
||||||
0x1340005f: 155, // en-DE
|
|
||||||
0x13400060: 156, // en-DG
|
|
||||||
0x13400062: 157, // en-DK
|
|
||||||
0x13400063: 158, // en-DM
|
|
||||||
0x1340006c: 159, // en-ER
|
|
||||||
0x13400071: 160, // en-FI
|
|
||||||
0x13400072: 161, // en-FJ
|
|
||||||
0x13400073: 162, // en-FK
|
|
||||||
0x13400074: 163, // en-FM
|
|
||||||
0x1340007a: 164, // en-GB
|
|
||||||
0x1340007b: 165, // en-GD
|
|
||||||
0x1340007e: 166, // en-GG
|
|
||||||
0x1340007f: 167, // en-GH
|
|
||||||
0x13400080: 168, // en-GI
|
|
||||||
0x13400082: 169, // en-GM
|
|
||||||
0x13400089: 170, // en-GU
|
|
||||||
0x1340008b: 171, // en-GY
|
|
||||||
0x1340008c: 172, // en-HK
|
|
||||||
0x13400095: 173, // en-IE
|
|
||||||
0x13400096: 174, // en-IL
|
|
||||||
0x13400097: 175, // en-IM
|
|
||||||
0x13400098: 176, // en-IN
|
|
||||||
0x13400099: 177, // en-IO
|
|
||||||
0x1340009e: 178, // en-JE
|
|
||||||
0x1340009f: 179, // en-JM
|
|
||||||
0x134000a3: 180, // en-KE
|
|
||||||
0x134000a6: 181, // en-KI
|
|
||||||
0x134000a8: 182, // en-KN
|
|
||||||
0x134000ac: 183, // en-KY
|
|
||||||
0x134000b0: 184, // en-LC
|
|
||||||
0x134000b3: 185, // en-LR
|
|
||||||
0x134000b4: 186, // en-LS
|
|
||||||
0x134000be: 187, // en-MG
|
|
||||||
0x134000bf: 188, // en-MH
|
|
||||||
0x134000c5: 189, // en-MO
|
|
||||||
0x134000c6: 190, // en-MP
|
|
||||||
0x134000c9: 191, // en-MS
|
|
||||||
0x134000ca: 192, // en-MT
|
|
||||||
0x134000cb: 193, // en-MU
|
|
||||||
0x134000cd: 194, // en-MW
|
|
||||||
0x134000cf: 195, // en-MY
|
|
||||||
0x134000d1: 196, // en-NA
|
|
||||||
0x134000d4: 197, // en-NF
|
|
||||||
0x134000d5: 198, // en-NG
|
|
||||||
0x134000d8: 199, // en-NL
|
|
||||||
0x134000dc: 200, // en-NR
|
|
||||||
0x134000de: 201, // en-NU
|
|
||||||
0x134000df: 202, // en-NZ
|
|
||||||
0x134000e5: 203, // en-PG
|
|
||||||
0x134000e6: 204, // en-PH
|
|
||||||
0x134000e7: 205, // en-PK
|
|
||||||
0x134000ea: 206, // en-PN
|
|
||||||
0x134000eb: 207, // en-PR
|
|
||||||
0x134000ef: 208, // en-PW
|
|
||||||
0x13400106: 209, // en-RW
|
|
||||||
0x13400108: 210, // en-SB
|
|
||||||
0x13400109: 211, // en-SC
|
|
||||||
0x1340010a: 212, // en-SD
|
|
||||||
0x1340010b: 213, // en-SE
|
|
||||||
0x1340010c: 214, // en-SG
|
|
||||||
0x1340010d: 215, // en-SH
|
|
||||||
0x1340010e: 216, // en-SI
|
|
||||||
0x13400111: 217, // en-SL
|
|
||||||
0x13400116: 218, // en-SS
|
|
||||||
0x1340011a: 219, // en-SX
|
|
||||||
0x1340011c: 220, // en-SZ
|
|
||||||
0x1340011e: 221, // en-TC
|
|
||||||
0x13400124: 222, // en-TK
|
|
||||||
0x13400128: 223, // en-TO
|
|
||||||
0x1340012b: 224, // en-TT
|
|
||||||
0x1340012c: 225, // en-TV
|
|
||||||
0x1340012e: 226, // en-TZ
|
|
||||||
0x13400130: 227, // en-UG
|
|
||||||
0x13400132: 228, // en-UM
|
|
||||||
0x13400134: 229, // en-US
|
|
||||||
0x13400138: 230, // en-VC
|
|
||||||
0x1340013b: 231, // en-VG
|
|
||||||
0x1340013c: 232, // en-VI
|
|
||||||
0x1340013e: 233, // en-VU
|
|
||||||
0x13400141: 234, // en-WS
|
|
||||||
0x13400160: 235, // en-ZA
|
|
||||||
0x13400161: 236, // en-ZM
|
|
||||||
0x13400163: 237, // en-ZW
|
|
||||||
0x13700000: 238, // eo
|
|
||||||
0x13700001: 239, // eo-001
|
|
||||||
0x13900000: 240, // es
|
|
||||||
0x1390001e: 241, // es-419
|
|
||||||
0x1390002b: 242, // es-AR
|
|
||||||
0x1390003e: 243, // es-BO
|
|
||||||
0x13900040: 244, // es-BR
|
|
||||||
0x13900050: 245, // es-CL
|
|
||||||
0x13900053: 246, // es-CO
|
|
||||||
0x13900055: 247, // es-CR
|
|
||||||
0x13900058: 248, // es-CU
|
|
||||||
0x13900064: 249, // es-DO
|
|
||||||
0x13900067: 250, // es-EA
|
|
||||||
0x13900068: 251, // es-EC
|
|
||||||
0x1390006d: 252, // es-ES
|
|
||||||
0x13900085: 253, // es-GQ
|
|
||||||
0x13900088: 254, // es-GT
|
|
||||||
0x1390008e: 255, // es-HN
|
|
||||||
0x13900093: 256, // es-IC
|
|
||||||
0x139000ce: 257, // es-MX
|
|
||||||
0x139000d7: 258, // es-NI
|
|
||||||
0x139000e1: 259, // es-PA
|
|
||||||
0x139000e3: 260, // es-PE
|
|
||||||
0x139000e6: 261, // es-PH
|
|
||||||
0x139000eb: 262, // es-PR
|
|
||||||
0x139000f0: 263, // es-PY
|
|
||||||
0x13900119: 264, // es-SV
|
|
||||||
0x13900134: 265, // es-US
|
|
||||||
0x13900135: 266, // es-UY
|
|
||||||
0x1390013a: 267, // es-VE
|
|
||||||
0x13b00000: 268, // et
|
|
||||||
0x13b00069: 269, // et-EE
|
|
||||||
0x14000000: 270, // eu
|
|
||||||
0x1400006d: 271, // eu-ES
|
|
||||||
0x14100000: 272, // ewo
|
|
||||||
0x14100051: 273, // ewo-CM
|
|
||||||
0x14300000: 274, // fa
|
|
||||||
0x14300023: 275, // fa-AF
|
|
||||||
0x1430009b: 276, // fa-IR
|
|
||||||
0x14900000: 277, // ff
|
|
||||||
0x14900051: 278, // ff-CM
|
|
||||||
0x14900083: 279, // ff-GN
|
|
||||||
0x149000c8: 280, // ff-MR
|
|
||||||
0x14900113: 281, // ff-SN
|
|
||||||
0x14c00000: 282, // fi
|
|
||||||
0x14c00071: 283, // fi-FI
|
|
||||||
0x14e00000: 284, // fil
|
|
||||||
0x14e000e6: 285, // fil-PH
|
|
||||||
0x15300000: 286, // fo
|
|
||||||
0x15300062: 287, // fo-DK
|
|
||||||
0x15300075: 288, // fo-FO
|
|
||||||
0x15900000: 289, // fr
|
|
||||||
0x15900035: 290, // fr-BE
|
|
||||||
0x15900036: 291, // fr-BF
|
|
||||||
0x15900039: 292, // fr-BI
|
|
||||||
0x1590003a: 293, // fr-BJ
|
|
||||||
0x1590003b: 294, // fr-BL
|
|
||||||
0x15900048: 295, // fr-CA
|
|
||||||
0x1590004a: 296, // fr-CD
|
|
||||||
0x1590004b: 297, // fr-CF
|
|
||||||
0x1590004c: 298, // fr-CG
|
|
||||||
0x1590004d: 299, // fr-CH
|
|
||||||
0x1590004e: 300, // fr-CI
|
|
||||||
0x15900051: 301, // fr-CM
|
|
||||||
0x15900061: 302, // fr-DJ
|
|
||||||
0x15900066: 303, // fr-DZ
|
|
||||||
0x15900077: 304, // fr-FR
|
|
||||||
0x15900079: 305, // fr-GA
|
|
||||||
0x1590007d: 306, // fr-GF
|
|
||||||
0x15900083: 307, // fr-GN
|
|
||||||
0x15900084: 308, // fr-GP
|
|
||||||
0x15900085: 309, // fr-GQ
|
|
||||||
0x15900090: 310, // fr-HT
|
|
||||||
0x159000a7: 311, // fr-KM
|
|
||||||
0x159000b6: 312, // fr-LU
|
|
||||||
0x159000b9: 313, // fr-MA
|
|
||||||
0x159000ba: 314, // fr-MC
|
|
||||||
0x159000bd: 315, // fr-MF
|
|
||||||
0x159000be: 316, // fr-MG
|
|
||||||
0x159000c2: 317, // fr-ML
|
|
||||||
0x159000c7: 318, // fr-MQ
|
|
||||||
0x159000c8: 319, // fr-MR
|
|
||||||
0x159000cb: 320, // fr-MU
|
|
||||||
0x159000d2: 321, // fr-NC
|
|
||||||
0x159000d3: 322, // fr-NE
|
|
||||||
0x159000e4: 323, // fr-PF
|
|
||||||
0x159000e9: 324, // fr-PM
|
|
||||||
0x15900101: 325, // fr-RE
|
|
||||||
0x15900106: 326, // fr-RW
|
|
||||||
0x15900109: 327, // fr-SC
|
|
||||||
0x15900113: 328, // fr-SN
|
|
||||||
0x1590011b: 329, // fr-SY
|
|
||||||
0x1590011f: 330, // fr-TD
|
|
||||||
0x15900121: 331, // fr-TG
|
|
||||||
0x15900127: 332, // fr-TN
|
|
||||||
0x1590013e: 333, // fr-VU
|
|
||||||
0x1590013f: 334, // fr-WF
|
|
||||||
0x1590015e: 335, // fr-YT
|
|
||||||
0x16400000: 336, // fur
|
|
||||||
0x1640009d: 337, // fur-IT
|
|
||||||
0x16800000: 338, // fy
|
|
||||||
0x168000d8: 339, // fy-NL
|
|
||||||
0x16900000: 340, // ga
|
|
||||||
0x16900095: 341, // ga-IE
|
|
||||||
0x17800000: 342, // gd
|
|
||||||
0x1780007a: 343, // gd-GB
|
|
||||||
0x18a00000: 344, // gl
|
|
||||||
0x18a0006d: 345, // gl-ES
|
|
||||||
0x19c00000: 346, // gsw
|
|
||||||
0x19c0004d: 347, // gsw-CH
|
|
||||||
0x19c00077: 348, // gsw-FR
|
|
||||||
0x19c000b1: 349, // gsw-LI
|
|
||||||
0x19d00000: 350, // gu
|
|
||||||
0x19d00098: 351, // gu-IN
|
|
||||||
0x1a200000: 352, // guw
|
|
||||||
0x1a400000: 353, // guz
|
|
||||||
0x1a4000a3: 354, // guz-KE
|
|
||||||
0x1a500000: 355, // gv
|
|
||||||
0x1a500097: 356, // gv-IM
|
|
||||||
0x1ad00000: 357, // ha
|
|
||||||
0x1ad0007f: 358, // ha-GH
|
|
||||||
0x1ad000d3: 359, // ha-NE
|
|
||||||
0x1ad000d5: 360, // ha-NG
|
|
||||||
0x1b100000: 361, // haw
|
|
||||||
0x1b100134: 362, // haw-US
|
|
||||||
0x1b500000: 363, // he
|
|
||||||
0x1b500096: 364, // he-IL
|
|
||||||
0x1b700000: 365, // hi
|
|
||||||
0x1b700098: 366, // hi-IN
|
|
||||||
0x1ca00000: 367, // hr
|
|
||||||
0x1ca00032: 368, // hr-BA
|
|
||||||
0x1ca0008f: 369, // hr-HR
|
|
||||||
0x1cb00000: 370, // hsb
|
|
||||||
0x1cb0005f: 371, // hsb-DE
|
|
||||||
0x1ce00000: 372, // hu
|
|
||||||
0x1ce00091: 373, // hu-HU
|
|
||||||
0x1d000000: 374, // hy
|
|
||||||
0x1d000027: 375, // hy-AM
|
|
||||||
0x1da00000: 376, // id
|
|
||||||
0x1da00094: 377, // id-ID
|
|
||||||
0x1df00000: 378, // ig
|
|
||||||
0x1df000d5: 379, // ig-NG
|
|
||||||
0x1e200000: 380, // ii
|
|
||||||
0x1e200052: 381, // ii-CN
|
|
||||||
0x1f000000: 382, // is
|
|
||||||
0x1f00009c: 383, // is-IS
|
|
||||||
0x1f100000: 384, // it
|
|
||||||
0x1f10004d: 385, // it-CH
|
|
||||||
0x1f10009d: 386, // it-IT
|
|
||||||
0x1f100112: 387, // it-SM
|
|
||||||
0x1f200000: 388, // iu
|
|
||||||
0x1f800000: 389, // ja
|
|
||||||
0x1f8000a1: 390, // ja-JP
|
|
||||||
0x1fb00000: 391, // jbo
|
|
||||||
0x1ff00000: 392, // jgo
|
|
||||||
0x1ff00051: 393, // jgo-CM
|
|
||||||
0x20200000: 394, // jmc
|
|
||||||
0x2020012e: 395, // jmc-TZ
|
|
||||||
0x20600000: 396, // jv
|
|
||||||
0x20800000: 397, // ka
|
|
||||||
0x2080007c: 398, // ka-GE
|
|
||||||
0x20a00000: 399, // kab
|
|
||||||
0x20a00066: 400, // kab-DZ
|
|
||||||
0x20e00000: 401, // kaj
|
|
||||||
0x20f00000: 402, // kam
|
|
||||||
0x20f000a3: 403, // kam-KE
|
|
||||||
0x21700000: 404, // kcg
|
|
||||||
0x21b00000: 405, // kde
|
|
||||||
0x21b0012e: 406, // kde-TZ
|
|
||||||
0x21f00000: 407, // kea
|
|
||||||
0x21f00059: 408, // kea-CV
|
|
||||||
0x22c00000: 409, // khq
|
|
||||||
0x22c000c2: 410, // khq-ML
|
|
||||||
0x23100000: 411, // ki
|
|
||||||
0x231000a3: 412, // ki-KE
|
|
||||||
0x23a00000: 413, // kk
|
|
||||||
0x23a000ad: 414, // kk-KZ
|
|
||||||
0x23c00000: 415, // kkj
|
|
||||||
0x23c00051: 416, // kkj-CM
|
|
||||||
0x23d00000: 417, // kl
|
|
||||||
0x23d00081: 418, // kl-GL
|
|
||||||
0x23e00000: 419, // kln
|
|
||||||
0x23e000a3: 420, // kln-KE
|
|
||||||
0x24200000: 421, // km
|
|
||||||
0x242000a5: 422, // km-KH
|
|
||||||
0x24900000: 423, // kn
|
|
||||||
0x24900098: 424, // kn-IN
|
|
||||||
0x24b00000: 425, // ko
|
|
||||||
0x24b000a9: 426, // ko-KP
|
|
||||||
0x24b000aa: 427, // ko-KR
|
|
||||||
0x24d00000: 428, // kok
|
|
||||||
0x24d00098: 429, // kok-IN
|
|
||||||
0x26100000: 430, // ks
|
|
||||||
0x26100098: 431, // ks-IN
|
|
||||||
0x26200000: 432, // ksb
|
|
||||||
0x2620012e: 433, // ksb-TZ
|
|
||||||
0x26400000: 434, // ksf
|
|
||||||
0x26400051: 435, // ksf-CM
|
|
||||||
0x26500000: 436, // ksh
|
|
||||||
0x2650005f: 437, // ksh-DE
|
|
||||||
0x26b00000: 438, // ku
|
|
||||||
0x27800000: 439, // kw
|
|
||||||
0x2780007a: 440, // kw-GB
|
|
||||||
0x28100000: 441, // ky
|
|
||||||
0x281000a4: 442, // ky-KG
|
|
||||||
0x28800000: 443, // lag
|
|
||||||
0x2880012e: 444, // lag-TZ
|
|
||||||
0x28c00000: 445, // lb
|
|
||||||
0x28c000b6: 446, // lb-LU
|
|
||||||
0x29a00000: 447, // lg
|
|
||||||
0x29a00130: 448, // lg-UG
|
|
||||||
0x2a600000: 449, // lkt
|
|
||||||
0x2a600134: 450, // lkt-US
|
|
||||||
0x2ac00000: 451, // ln
|
|
||||||
0x2ac00029: 452, // ln-AO
|
|
||||||
0x2ac0004a: 453, // ln-CD
|
|
||||||
0x2ac0004b: 454, // ln-CF
|
|
||||||
0x2ac0004c: 455, // ln-CG
|
|
||||||
0x2af00000: 456, // lo
|
|
||||||
0x2af000ae: 457, // lo-LA
|
|
||||||
0x2b600000: 458, // lrc
|
|
||||||
0x2b60009a: 459, // lrc-IQ
|
|
||||||
0x2b60009b: 460, // lrc-IR
|
|
||||||
0x2b700000: 461, // lt
|
|
||||||
0x2b7000b5: 462, // lt-LT
|
|
||||||
0x2b900000: 463, // lu
|
|
||||||
0x2b90004a: 464, // lu-CD
|
|
||||||
0x2bb00000: 465, // luo
|
|
||||||
0x2bb000a3: 466, // luo-KE
|
|
||||||
0x2bc00000: 467, // luy
|
|
||||||
0x2bc000a3: 468, // luy-KE
|
|
||||||
0x2be00000: 469, // lv
|
|
||||||
0x2be000b7: 470, // lv-LV
|
|
||||||
0x2c800000: 471, // mas
|
|
||||||
0x2c8000a3: 472, // mas-KE
|
|
||||||
0x2c80012e: 473, // mas-TZ
|
|
||||||
0x2e000000: 474, // mer
|
|
||||||
0x2e0000a3: 475, // mer-KE
|
|
||||||
0x2e400000: 476, // mfe
|
|
||||||
0x2e4000cb: 477, // mfe-MU
|
|
||||||
0x2e800000: 478, // mg
|
|
||||||
0x2e8000be: 479, // mg-MG
|
|
||||||
0x2e900000: 480, // mgh
|
|
||||||
0x2e9000d0: 481, // mgh-MZ
|
|
||||||
0x2eb00000: 482, // mgo
|
|
||||||
0x2eb00051: 483, // mgo-CM
|
|
||||||
0x2f600000: 484, // mk
|
|
||||||
0x2f6000c1: 485, // mk-MK
|
|
||||||
0x2fb00000: 486, // ml
|
|
||||||
0x2fb00098: 487, // ml-IN
|
|
||||||
0x30200000: 488, // mn
|
|
||||||
0x302000c4: 489, // mn-MN
|
|
||||||
0x31200000: 490, // mr
|
|
||||||
0x31200098: 491, // mr-IN
|
|
||||||
0x31600000: 492, // ms
|
|
||||||
0x3160003d: 493, // ms-BN
|
|
||||||
0x316000cf: 494, // ms-MY
|
|
||||||
0x3160010c: 495, // ms-SG
|
|
||||||
0x31700000: 496, // mt
|
|
||||||
0x317000ca: 497, // mt-MT
|
|
||||||
0x31c00000: 498, // mua
|
|
||||||
0x31c00051: 499, // mua-CM
|
|
||||||
0x32800000: 500, // my
|
|
||||||
0x328000c3: 501, // my-MM
|
|
||||||
0x33100000: 502, // mzn
|
|
||||||
0x3310009b: 503, // mzn-IR
|
|
||||||
0x33800000: 504, // nah
|
|
||||||
0x33c00000: 505, // naq
|
|
||||||
0x33c000d1: 506, // naq-NA
|
|
||||||
0x33e00000: 507, // nb
|
|
||||||
0x33e000d9: 508, // nb-NO
|
|
||||||
0x33e0010f: 509, // nb-SJ
|
|
||||||
0x34500000: 510, // nd
|
|
||||||
0x34500163: 511, // nd-ZW
|
|
||||||
0x34700000: 512, // nds
|
|
||||||
0x3470005f: 513, // nds-DE
|
|
||||||
0x347000d8: 514, // nds-NL
|
|
||||||
0x34800000: 515, // ne
|
|
||||||
0x34800098: 516, // ne-IN
|
|
||||||
0x348000da: 517, // ne-NP
|
|
||||||
0x35e00000: 518, // nl
|
|
||||||
0x35e0002f: 519, // nl-AW
|
|
||||||
0x35e00035: 520, // nl-BE
|
|
||||||
0x35e0003f: 521, // nl-BQ
|
|
||||||
0x35e0005a: 522, // nl-CW
|
|
||||||
0x35e000d8: 523, // nl-NL
|
|
||||||
0x35e00115: 524, // nl-SR
|
|
||||||
0x35e0011a: 525, // nl-SX
|
|
||||||
0x35f00000: 526, // nmg
|
|
||||||
0x35f00051: 527, // nmg-CM
|
|
||||||
0x36100000: 528, // nn
|
|
||||||
0x361000d9: 529, // nn-NO
|
|
||||||
0x36300000: 530, // nnh
|
|
||||||
0x36300051: 531, // nnh-CM
|
|
||||||
0x36600000: 532, // no
|
|
||||||
0x36c00000: 533, // nqo
|
|
||||||
0x36d00000: 534, // nr
|
|
||||||
0x37100000: 535, // nso
|
|
||||||
0x37700000: 536, // nus
|
|
||||||
0x37700116: 537, // nus-SS
|
|
||||||
0x37e00000: 538, // ny
|
|
||||||
0x38000000: 539, // nyn
|
|
||||||
0x38000130: 540, // nyn-UG
|
|
||||||
0x38700000: 541, // om
|
|
||||||
0x3870006e: 542, // om-ET
|
|
||||||
0x387000a3: 543, // om-KE
|
|
||||||
0x38c00000: 544, // or
|
|
||||||
0x38c00098: 545, // or-IN
|
|
||||||
0x38f00000: 546, // os
|
|
||||||
0x38f0007c: 547, // os-GE
|
|
||||||
0x38f00105: 548, // os-RU
|
|
||||||
0x39400000: 549, // pa
|
|
||||||
0x39405000: 550, // pa-Arab
|
|
||||||
0x394050e7: 551, // pa-Arab-PK
|
|
||||||
0x3942f000: 552, // pa-Guru
|
|
||||||
0x3942f098: 553, // pa-Guru-IN
|
|
||||||
0x39800000: 554, // pap
|
|
||||||
0x3aa00000: 555, // pl
|
|
||||||
0x3aa000e8: 556, // pl-PL
|
|
||||||
0x3b400000: 557, // prg
|
|
||||||
0x3b400001: 558, // prg-001
|
|
||||||
0x3b500000: 559, // ps
|
|
||||||
0x3b500023: 560, // ps-AF
|
|
||||||
0x3b700000: 561, // pt
|
|
||||||
0x3b700029: 562, // pt-AO
|
|
||||||
0x3b700040: 563, // pt-BR
|
|
||||||
0x3b70004d: 564, // pt-CH
|
|
||||||
0x3b700059: 565, // pt-CV
|
|
||||||
0x3b700085: 566, // pt-GQ
|
|
||||||
0x3b70008a: 567, // pt-GW
|
|
||||||
0x3b7000b6: 568, // pt-LU
|
|
||||||
0x3b7000c5: 569, // pt-MO
|
|
||||||
0x3b7000d0: 570, // pt-MZ
|
|
||||||
0x3b7000ed: 571, // pt-PT
|
|
||||||
0x3b700117: 572, // pt-ST
|
|
||||||
0x3b700125: 573, // pt-TL
|
|
||||||
0x3bb00000: 574, // qu
|
|
||||||
0x3bb0003e: 575, // qu-BO
|
|
||||||
0x3bb00068: 576, // qu-EC
|
|
||||||
0x3bb000e3: 577, // qu-PE
|
|
||||||
0x3cb00000: 578, // rm
|
|
||||||
0x3cb0004d: 579, // rm-CH
|
|
||||||
0x3d000000: 580, // rn
|
|
||||||
0x3d000039: 581, // rn-BI
|
|
||||||
0x3d300000: 582, // ro
|
|
||||||
0x3d3000bb: 583, // ro-MD
|
|
||||||
0x3d300103: 584, // ro-RO
|
|
||||||
0x3d500000: 585, // rof
|
|
||||||
0x3d50012e: 586, // rof-TZ
|
|
||||||
0x3d900000: 587, // ru
|
|
||||||
0x3d900046: 588, // ru-BY
|
|
||||||
0x3d9000a4: 589, // ru-KG
|
|
||||||
0x3d9000ad: 590, // ru-KZ
|
|
||||||
0x3d9000bb: 591, // ru-MD
|
|
||||||
0x3d900105: 592, // ru-RU
|
|
||||||
0x3d90012f: 593, // ru-UA
|
|
||||||
0x3dc00000: 594, // rw
|
|
||||||
0x3dc00106: 595, // rw-RW
|
|
||||||
0x3dd00000: 596, // rwk
|
|
||||||
0x3dd0012e: 597, // rwk-TZ
|
|
||||||
0x3e200000: 598, // sah
|
|
||||||
0x3e200105: 599, // sah-RU
|
|
||||||
0x3e300000: 600, // saq
|
|
||||||
0x3e3000a3: 601, // saq-KE
|
|
||||||
0x3e900000: 602, // sbp
|
|
||||||
0x3e90012e: 603, // sbp-TZ
|
|
||||||
0x3f200000: 604, // sdh
|
|
||||||
0x3f300000: 605, // se
|
|
||||||
0x3f300071: 606, // se-FI
|
|
||||||
0x3f3000d9: 607, // se-NO
|
|
||||||
0x3f30010b: 608, // se-SE
|
|
||||||
0x3f500000: 609, // seh
|
|
||||||
0x3f5000d0: 610, // seh-MZ
|
|
||||||
0x3f700000: 611, // ses
|
|
||||||
0x3f7000c2: 612, // ses-ML
|
|
||||||
0x3f800000: 613, // sg
|
|
||||||
0x3f80004b: 614, // sg-CF
|
|
||||||
0x3fe00000: 615, // shi
|
|
||||||
0x3fe52000: 616, // shi-Latn
|
|
||||||
0x3fe520b9: 617, // shi-Latn-MA
|
|
||||||
0x3fed2000: 618, // shi-Tfng
|
|
||||||
0x3fed20b9: 619, // shi-Tfng-MA
|
|
||||||
0x40200000: 620, // si
|
|
||||||
0x402000b2: 621, // si-LK
|
|
||||||
0x40800000: 622, // sk
|
|
||||||
0x40800110: 623, // sk-SK
|
|
||||||
0x40c00000: 624, // sl
|
|
||||||
0x40c0010e: 625, // sl-SI
|
|
||||||
0x41200000: 626, // sma
|
|
||||||
0x41300000: 627, // smi
|
|
||||||
0x41400000: 628, // smj
|
|
||||||
0x41500000: 629, // smn
|
|
||||||
0x41500071: 630, // smn-FI
|
|
||||||
0x41800000: 631, // sms
|
|
||||||
0x41900000: 632, // sn
|
|
||||||
0x41900163: 633, // sn-ZW
|
|
||||||
0x41f00000: 634, // so
|
|
||||||
0x41f00061: 635, // so-DJ
|
|
||||||
0x41f0006e: 636, // so-ET
|
|
||||||
0x41f000a3: 637, // so-KE
|
|
||||||
0x41f00114: 638, // so-SO
|
|
||||||
0x42700000: 639, // sq
|
|
||||||
0x42700026: 640, // sq-AL
|
|
||||||
0x427000c1: 641, // sq-MK
|
|
||||||
0x4270014c: 642, // sq-XK
|
|
||||||
0x42800000: 643, // sr
|
|
||||||
0x4281e000: 644, // sr-Cyrl
|
|
||||||
0x4281e032: 645, // sr-Cyrl-BA
|
|
||||||
0x4281e0bc: 646, // sr-Cyrl-ME
|
|
||||||
0x4281e104: 647, // sr-Cyrl-RS
|
|
||||||
0x4281e14c: 648, // sr-Cyrl-XK
|
|
||||||
0x42852000: 649, // sr-Latn
|
|
||||||
0x42852032: 650, // sr-Latn-BA
|
|
||||||
0x428520bc: 651, // sr-Latn-ME
|
|
||||||
0x42852104: 652, // sr-Latn-RS
|
|
||||||
0x4285214c: 653, // sr-Latn-XK
|
|
||||||
0x42d00000: 654, // ss
|
|
||||||
0x43000000: 655, // ssy
|
|
||||||
0x43100000: 656, // st
|
|
||||||
0x43a00000: 657, // sv
|
|
||||||
0x43a00030: 658, // sv-AX
|
|
||||||
0x43a00071: 659, // sv-FI
|
|
||||||
0x43a0010b: 660, // sv-SE
|
|
||||||
0x43b00000: 661, // sw
|
|
||||||
0x43b0004a: 662, // sw-CD
|
|
||||||
0x43b000a3: 663, // sw-KE
|
|
||||||
0x43b0012e: 664, // sw-TZ
|
|
||||||
0x43b00130: 665, // sw-UG
|
|
||||||
0x44400000: 666, // syr
|
|
||||||
0x44600000: 667, // ta
|
|
||||||
0x44600098: 668, // ta-IN
|
|
||||||
0x446000b2: 669, // ta-LK
|
|
||||||
0x446000cf: 670, // ta-MY
|
|
||||||
0x4460010c: 671, // ta-SG
|
|
||||||
0x45700000: 672, // te
|
|
||||||
0x45700098: 673, // te-IN
|
|
||||||
0x45a00000: 674, // teo
|
|
||||||
0x45a000a3: 675, // teo-KE
|
|
||||||
0x45a00130: 676, // teo-UG
|
|
||||||
0x46100000: 677, // th
|
|
||||||
0x46100122: 678, // th-TH
|
|
||||||
0x46500000: 679, // ti
|
|
||||||
0x4650006c: 680, // ti-ER
|
|
||||||
0x4650006e: 681, // ti-ET
|
|
||||||
0x46700000: 682, // tig
|
|
||||||
0x46c00000: 683, // tk
|
|
||||||
0x46c00126: 684, // tk-TM
|
|
||||||
0x47600000: 685, // tn
|
|
||||||
0x47800000: 686, // to
|
|
||||||
0x47800128: 687, // to-TO
|
|
||||||
0x48000000: 688, // tr
|
|
||||||
0x4800005c: 689, // tr-CY
|
|
||||||
0x4800012a: 690, // tr-TR
|
|
||||||
0x48400000: 691, // ts
|
|
||||||
0x49a00000: 692, // twq
|
|
||||||
0x49a000d3: 693, // twq-NE
|
|
||||||
0x49f00000: 694, // tzm
|
|
||||||
0x49f000b9: 695, // tzm-MA
|
|
||||||
0x4a200000: 696, // ug
|
|
||||||
0x4a200052: 697, // ug-CN
|
|
||||||
0x4a400000: 698, // uk
|
|
||||||
0x4a40012f: 699, // uk-UA
|
|
||||||
0x4aa00000: 700, // ur
|
|
||||||
0x4aa00098: 701, // ur-IN
|
|
||||||
0x4aa000e7: 702, // ur-PK
|
|
||||||
0x4b200000: 703, // uz
|
|
||||||
0x4b205000: 704, // uz-Arab
|
|
||||||
0x4b205023: 705, // uz-Arab-AF
|
|
||||||
0x4b21e000: 706, // uz-Cyrl
|
|
||||||
0x4b21e136: 707, // uz-Cyrl-UZ
|
|
||||||
0x4b252000: 708, // uz-Latn
|
|
||||||
0x4b252136: 709, // uz-Latn-UZ
|
|
||||||
0x4b400000: 710, // vai
|
|
||||||
0x4b452000: 711, // vai-Latn
|
|
||||||
0x4b4520b3: 712, // vai-Latn-LR
|
|
||||||
0x4b4d9000: 713, // vai-Vaii
|
|
||||||
0x4b4d90b3: 714, // vai-Vaii-LR
|
|
||||||
0x4b600000: 715, // ve
|
|
||||||
0x4b900000: 716, // vi
|
|
||||||
0x4b90013d: 717, // vi-VN
|
|
||||||
0x4bf00000: 718, // vo
|
|
||||||
0x4bf00001: 719, // vo-001
|
|
||||||
0x4c200000: 720, // vun
|
|
||||||
0x4c20012e: 721, // vun-TZ
|
|
||||||
0x4c400000: 722, // wa
|
|
||||||
0x4c500000: 723, // wae
|
|
||||||
0x4c50004d: 724, // wae-CH
|
|
||||||
0x4db00000: 725, // wo
|
|
||||||
0x4e800000: 726, // xh
|
|
||||||
0x4f100000: 727, // xog
|
|
||||||
0x4f100130: 728, // xog-UG
|
|
||||||
0x4ff00000: 729, // yav
|
|
||||||
0x4ff00051: 730, // yav-CM
|
|
||||||
0x50800000: 731, // yi
|
|
||||||
0x50800001: 732, // yi-001
|
|
||||||
0x50e00000: 733, // yo
|
|
||||||
0x50e0003a: 734, // yo-BJ
|
|
||||||
0x50e000d5: 735, // yo-NG
|
|
||||||
0x51500000: 736, // yue
|
|
||||||
0x5150008c: 737, // yue-HK
|
|
||||||
0x51e00000: 738, // zgh
|
|
||||||
0x51e000b9: 739, // zgh-MA
|
|
||||||
0x51f00000: 740, // zh
|
|
||||||
0x51f34000: 741, // zh-Hans
|
|
||||||
0x51f34052: 742, // zh-Hans-CN
|
|
||||||
0x51f3408c: 743, // zh-Hans-HK
|
|
||||||
0x51f340c5: 744, // zh-Hans-MO
|
|
||||||
0x51f3410c: 745, // zh-Hans-SG
|
|
||||||
0x51f35000: 746, // zh-Hant
|
|
||||||
0x51f3508c: 747, // zh-Hant-HK
|
|
||||||
0x51f350c5: 748, // zh-Hant-MO
|
|
||||||
0x51f3512d: 749, // zh-Hant-TW
|
|
||||||
0x52400000: 750, // zu
|
|
||||||
0x52400160: 751, // zu-ZA
|
|
||||||
}
|
|
||||||
|
|
||||||
// Total table size 4580 bytes (4KiB); checksum: A7F72A2A
|
|
975
vendor/golang.org/x/text/language/language.go
generated
vendored
975
vendor/golang.org/x/text/language/language.go
generated
vendored
@ -1,975 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
//go:generate go run maketables.go gen_common.go -output tables.go
|
|
||||||
//go:generate go run gen_index.go
|
|
||||||
|
|
||||||
// Package language implements BCP 47 language tags and related functionality.
|
|
||||||
//
|
|
||||||
// The Tag type, which is used to represent languages, is agnostic to the
|
|
||||||
// meaning of its subtags. Tags are not fully canonicalized to preserve
|
|
||||||
// information that may be valuable in certain contexts. As a consequence, two
|
|
||||||
// different tags may represent identical languages.
|
|
||||||
//
|
|
||||||
// Initializing language- or locale-specific components usually consists of
|
|
||||||
// two steps. The first step is to select a display language based on the
|
|
||||||
// preferred languages of the user and the languages supported by an application.
|
|
||||||
// The second step is to create the language-specific services based on
|
|
||||||
// this selection. Each is discussed in more details below.
|
|
||||||
//
|
|
||||||
// Matching preferred against supported languages
|
|
||||||
//
|
|
||||||
// An application may support various languages. This list is typically limited
|
|
||||||
// by the languages for which there exists translations of the user interface.
|
|
||||||
// Similarly, a user may provide a list of preferred languages which is limited
|
|
||||||
// by the languages understood by this user.
|
|
||||||
// An application should use a Matcher to find the best supported language based
|
|
||||||
// on the user's preferred list.
|
|
||||||
// Matchers are aware of the intricacies of equivalence between languages.
|
|
||||||
// The default Matcher implementation takes into account things such as
|
|
||||||
// deprecated subtags, legacy tags, and mutual intelligibility between scripts
|
|
||||||
// and languages.
|
|
||||||
//
|
|
||||||
// A Matcher for English, Australian English, Danish, and standard Mandarin can
|
|
||||||
// be defined as follows:
|
|
||||||
//
|
|
||||||
// var matcher = language.NewMatcher([]language.Tag{
|
|
||||||
// language.English, // The first language is used as fallback.
|
|
||||||
// language.MustParse("en-AU"),
|
|
||||||
// language.Danish,
|
|
||||||
// language.Chinese,
|
|
||||||
// })
|
|
||||||
//
|
|
||||||
// The following code selects the best match for someone speaking Spanish and
|
|
||||||
// Norwegian:
|
|
||||||
//
|
|
||||||
// preferred := []language.Tag{ language.Spanish, language.Norwegian }
|
|
||||||
// tag, _, _ := matcher.Match(preferred...)
|
|
||||||
//
|
|
||||||
// In this case, the best match is Danish, as Danish is sufficiently a match to
|
|
||||||
// Norwegian to not have to fall back to the default.
|
|
||||||
// See ParseAcceptLanguage on how to handle the Accept-Language HTTP header.
|
|
||||||
//
|
|
||||||
// Selecting language-specific services
|
|
||||||
//
|
|
||||||
// One should always use the Tag returned by the Matcher to create an instance
|
|
||||||
// of any of the language-specific services provided by the text repository.
|
|
||||||
// This prevents the mixing of languages, such as having a different language for
|
|
||||||
// messages and display names, as well as improper casing or sorting order for
|
|
||||||
// the selected language.
|
|
||||||
// Using the returned Tag also allows user-defined settings, such as collation
|
|
||||||
// order or numbering system to be transparently passed as options.
|
|
||||||
//
|
|
||||||
// If you have language-specific data in your application, however, it will in
|
|
||||||
// most cases suffice to use the index returned by the matcher to identify
|
|
||||||
// the user language.
|
|
||||||
// The following loop provides an alternative in case this is not sufficient:
|
|
||||||
//
|
|
||||||
// supported := map[language.Tag]data{
|
|
||||||
// language.English: enData,
|
|
||||||
// language.MustParse("en-AU"): enAUData,
|
|
||||||
// language.Danish: daData,
|
|
||||||
// language.Chinese: zhData,
|
|
||||||
// }
|
|
||||||
// tag, _, _ := matcher.Match(preferred...)
|
|
||||||
// for ; tag != language.Und; tag = tag.Parent() {
|
|
||||||
// if v, ok := supported[tag]; ok {
|
|
||||||
// return v
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// return enData // should not reach here
|
|
||||||
//
|
|
||||||
// Repeatedly taking the Parent of the tag returned by Match will eventually
|
|
||||||
// match one of the tags used to initialize the Matcher.
|
|
||||||
//
|
|
||||||
// Canonicalization
|
|
||||||
//
|
|
||||||
// By default, only legacy and deprecated tags are converted into their
|
|
||||||
// canonical equivalent. All other information is preserved. This approach makes
|
|
||||||
// the confidence scores more accurate and allows matchers to distinguish
|
|
||||||
// between variants that are otherwise lost.
|
|
||||||
//
|
|
||||||
// As a consequence, two tags that should be treated as identical according to
|
|
||||||
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
|
|
||||||
// Matchers will handle such distinctions, though, and are aware of the
|
|
||||||
// equivalence relations. The CanonType type can be used to alter the
|
|
||||||
// canonicalization form.
|
|
||||||
//
|
|
||||||
// References
|
|
||||||
//
|
|
||||||
// BCP 47 - Tags for Identifying Languages
|
|
||||||
// http://tools.ietf.org/html/bcp47
|
|
||||||
package language // import "golang.org/x/text/language"
|
|
||||||
|
|
||||||
// TODO: Remove above NOTE after:
|
|
||||||
// - verifying that tables are dropped correctly (most notably matcher tables).
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// maxCoreSize is the maximum size of a BCP 47 tag without variants and
|
|
||||||
// extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
|
|
||||||
maxCoreSize = 12
|
|
||||||
|
|
||||||
// max99thPercentileSize is a somewhat arbitrary buffer size that presumably
|
|
||||||
// is large enough to hold at least 99% of the BCP 47 tags.
|
|
||||||
max99thPercentileSize = 32
|
|
||||||
|
|
||||||
// maxSimpleUExtensionSize is the maximum size of a -u extension with one
|
|
||||||
// key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
|
|
||||||
maxSimpleUExtensionSize = 14
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
|
|
||||||
// specific language or locale. All language tag values are guaranteed to be
|
|
||||||
// well-formed.
|
|
||||||
type Tag struct {
|
|
||||||
lang langID
|
|
||||||
region regionID
|
|
||||||
script scriptID
|
|
||||||
pVariant byte // offset in str, includes preceding '-'
|
|
||||||
pExt uint16 // offset of first extension, includes preceding '-'
|
|
||||||
|
|
||||||
// str is the string representation of the Tag. It will only be used if the
|
|
||||||
// tag has variants or extensions.
|
|
||||||
str string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make is a convenience wrapper for Parse that omits the error.
|
|
||||||
// In case of an error, a sensible default is returned.
|
|
||||||
func Make(s string) Tag {
|
|
||||||
return Default.Make(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make is a convenience wrapper for c.Parse that omits the error.
|
|
||||||
// In case of an error, a sensible default is returned.
|
|
||||||
func (c CanonType) Make(s string) Tag {
|
|
||||||
t, _ := c.Parse(s)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Raw returns the raw base language, script and region, without making an
|
|
||||||
// attempt to infer their values.
|
|
||||||
func (t Tag) Raw() (b Base, s Script, r Region) {
|
|
||||||
return Base{t.lang}, Script{t.script}, Region{t.region}
|
|
||||||
}
|
|
||||||
|
|
||||||
// equalTags compares language, script and region subtags only.
|
|
||||||
func (t Tag) equalTags(a Tag) bool {
|
|
||||||
return t.lang == a.lang && t.script == a.script && t.region == a.region
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsRoot returns true if t is equal to language "und".
|
|
||||||
func (t Tag) IsRoot() bool {
|
|
||||||
if int(t.pVariant) < len(t.str) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t.equalTags(und)
|
|
||||||
}
|
|
||||||
|
|
||||||
// private reports whether the Tag consists solely of a private use tag.
|
|
||||||
func (t Tag) private() bool {
|
|
||||||
return t.str != "" && t.pVariant == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanonType can be used to enable or disable various types of canonicalization.
|
|
||||||
type CanonType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Replace deprecated base languages with their preferred replacements.
|
|
||||||
DeprecatedBase CanonType = 1 << iota
|
|
||||||
// Replace deprecated scripts with their preferred replacements.
|
|
||||||
DeprecatedScript
|
|
||||||
// Replace deprecated regions with their preferred replacements.
|
|
||||||
DeprecatedRegion
|
|
||||||
// Remove redundant scripts.
|
|
||||||
SuppressScript
|
|
||||||
// Normalize legacy encodings. This includes legacy languages defined in
|
|
||||||
// CLDR as well as bibliographic codes defined in ISO-639.
|
|
||||||
Legacy
|
|
||||||
// Map the dominant language of a macro language group to the macro language
|
|
||||||
// subtag. For example cmn -> zh.
|
|
||||||
Macro
|
|
||||||
// The CLDR flag should be used if full compatibility with CLDR is required.
|
|
||||||
// There are a few cases where language.Tag may differ from CLDR. To follow all
|
|
||||||
// of CLDR's suggestions, use All|CLDR.
|
|
||||||
CLDR
|
|
||||||
|
|
||||||
// Raw can be used to Compose or Parse without Canonicalization.
|
|
||||||
Raw CanonType = 0
|
|
||||||
|
|
||||||
// Replace all deprecated tags with their preferred replacements.
|
|
||||||
Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
|
|
||||||
|
|
||||||
// All canonicalizations recommended by BCP 47.
|
|
||||||
BCP47 = Deprecated | SuppressScript
|
|
||||||
|
|
||||||
// All canonicalizations.
|
|
||||||
All = BCP47 | Legacy | Macro
|
|
||||||
|
|
||||||
// Default is the canonicalization used by Parse, Make and Compose. To
|
|
||||||
// preserve as much information as possible, canonicalizations that remove
|
|
||||||
// potentially valuable information are not included. The Matcher is
|
|
||||||
// designed to recognize similar tags that would be the same if
|
|
||||||
// they were canonicalized using All.
|
|
||||||
Default = Deprecated | Legacy
|
|
||||||
|
|
||||||
canonLang = DeprecatedBase | Legacy | Macro
|
|
||||||
|
|
||||||
// TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
|
|
||||||
)
|
|
||||||
|
|
||||||
// canonicalize returns the canonicalized equivalent of the tag and
|
|
||||||
// whether there was any change.
|
|
||||||
func (t Tag) canonicalize(c CanonType) (Tag, bool) {
|
|
||||||
if c == Raw {
|
|
||||||
return t, false
|
|
||||||
}
|
|
||||||
changed := false
|
|
||||||
if c&SuppressScript != 0 {
|
|
||||||
if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] {
|
|
||||||
t.script = 0
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c&canonLang != 0 {
|
|
||||||
for {
|
|
||||||
if l, aliasType := normLang(t.lang); l != t.lang {
|
|
||||||
switch aliasType {
|
|
||||||
case langLegacy:
|
|
||||||
if c&Legacy != 0 {
|
|
||||||
if t.lang == _sh && t.script == 0 {
|
|
||||||
t.script = _Latn
|
|
||||||
}
|
|
||||||
t.lang = l
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
case langMacro:
|
|
||||||
if c&Macro != 0 {
|
|
||||||
// We deviate here from CLDR. The mapping "nb" -> "no"
|
|
||||||
// qualifies as a typical Macro language mapping. However,
|
|
||||||
// for legacy reasons, CLDR maps "no", the macro language
|
|
||||||
// code for Norwegian, to the dominant variant "nb". This
|
|
||||||
// change is currently under consideration for CLDR as well.
|
|
||||||
// See http://unicode.org/cldr/trac/ticket/2698 and also
|
|
||||||
// http://unicode.org/cldr/trac/ticket/1790 for some of the
|
|
||||||
// practical implications. TODO: this check could be removed
|
|
||||||
// if CLDR adopts this change.
|
|
||||||
if c&CLDR == 0 || t.lang != _nb {
|
|
||||||
changed = true
|
|
||||||
t.lang = l
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case langDeprecated:
|
|
||||||
if c&DeprecatedBase != 0 {
|
|
||||||
if t.lang == _mo && t.region == 0 {
|
|
||||||
t.region = _MD
|
|
||||||
}
|
|
||||||
t.lang = l
|
|
||||||
changed = true
|
|
||||||
// Other canonicalization types may still apply.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 {
|
|
||||||
t.lang = _nb
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c&DeprecatedScript != 0 {
|
|
||||||
if t.script == _Qaai {
|
|
||||||
changed = true
|
|
||||||
t.script = _Zinh
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c&DeprecatedRegion != 0 {
|
|
||||||
if r := normRegion(t.region); r != 0 {
|
|
||||||
changed = true
|
|
||||||
t.region = r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t, changed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Canonicalize returns the canonicalized equivalent of the tag.
|
|
||||||
func (c CanonType) Canonicalize(t Tag) (Tag, error) {
|
|
||||||
t, changed := t.canonicalize(c)
|
|
||||||
if changed {
|
|
||||||
t.remakeString()
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Confidence indicates the level of certainty for a given return value.
|
|
||||||
// For example, Serbian may be written in Cyrillic or Latin script.
|
|
||||||
// The confidence level indicates whether a value was explicitly specified,
|
|
||||||
// whether it is typically the only possible value, or whether there is
|
|
||||||
// an ambiguity.
|
|
||||||
type Confidence int
|
|
||||||
|
|
||||||
const (
|
|
||||||
No Confidence = iota // full confidence that there was no match
|
|
||||||
Low // most likely value picked out of a set of alternatives
|
|
||||||
High // value is generally assumed to be the correct match
|
|
||||||
Exact // exact match or explicitly specified value
|
|
||||||
)
|
|
||||||
|
|
||||||
var confName = []string{"No", "Low", "High", "Exact"}
|
|
||||||
|
|
||||||
func (c Confidence) String() string {
|
|
||||||
return confName[c]
|
|
||||||
}
|
|
||||||
|
|
||||||
// remakeString is used to update t.str in case lang, script or region changed.
|
|
||||||
// It is assumed that pExt and pVariant still point to the start of the
|
|
||||||
// respective parts.
|
|
||||||
func (t *Tag) remakeString() {
|
|
||||||
if t.str == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
extra := t.str[t.pVariant:]
|
|
||||||
if t.pVariant > 0 {
|
|
||||||
extra = extra[1:]
|
|
||||||
}
|
|
||||||
if t.equalTags(und) && strings.HasPrefix(extra, "x-") {
|
|
||||||
t.str = extra
|
|
||||||
t.pVariant = 0
|
|
||||||
t.pExt = 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
|
|
||||||
b := buf[:t.genCoreBytes(buf[:])]
|
|
||||||
if extra != "" {
|
|
||||||
diff := len(b) - int(t.pVariant)
|
|
||||||
b = append(b, '-')
|
|
||||||
b = append(b, extra...)
|
|
||||||
t.pVariant = uint8(int(t.pVariant) + diff)
|
|
||||||
t.pExt = uint16(int(t.pExt) + diff)
|
|
||||||
} else {
|
|
||||||
t.pVariant = uint8(len(b))
|
|
||||||
t.pExt = uint16(len(b))
|
|
||||||
}
|
|
||||||
t.str = string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// genCoreBytes writes a string for the base languages, script and region tags
|
|
||||||
// to the given buffer and returns the number of bytes written. It will never
|
|
||||||
// write more than maxCoreSize bytes.
|
|
||||||
func (t *Tag) genCoreBytes(buf []byte) int {
|
|
||||||
n := t.lang.stringToBuf(buf[:])
|
|
||||||
if t.script != 0 {
|
|
||||||
n += copy(buf[n:], "-")
|
|
||||||
n += copy(buf[n:], t.script.String())
|
|
||||||
}
|
|
||||||
if t.region != 0 {
|
|
||||||
n += copy(buf[n:], "-")
|
|
||||||
n += copy(buf[n:], t.region.String())
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the canonical string representation of the language tag.
|
|
||||||
func (t Tag) String() string {
|
|
||||||
if t.str != "" {
|
|
||||||
return t.str
|
|
||||||
}
|
|
||||||
if t.script == 0 && t.region == 0 {
|
|
||||||
return t.lang.String()
|
|
||||||
}
|
|
||||||
buf := [maxCoreSize]byte{}
|
|
||||||
return string(buf[:t.genCoreBytes(buf[:])])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Base returns the base language of the language tag. If the base language is
|
|
||||||
// unspecified, an attempt will be made to infer it from the context.
|
|
||||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
|
||||||
func (t Tag) Base() (Base, Confidence) {
|
|
||||||
if t.lang != 0 {
|
|
||||||
return Base{t.lang}, Exact
|
|
||||||
}
|
|
||||||
c := High
|
|
||||||
if t.script == 0 && !(Region{t.region}).IsCountry() {
|
|
||||||
c = Low
|
|
||||||
}
|
|
||||||
if tag, err := addTags(t); err == nil && tag.lang != 0 {
|
|
||||||
return Base{tag.lang}, c
|
|
||||||
}
|
|
||||||
return Base{0}, No
|
|
||||||
}
|
|
||||||
|
|
||||||
// Script infers the script for the language tag. If it was not explicitly given, it will infer
|
|
||||||
// a most likely candidate.
|
|
||||||
// If more than one script is commonly used for a language, the most likely one
|
|
||||||
// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
|
|
||||||
// for Serbian.
|
|
||||||
// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
|
|
||||||
// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
|
|
||||||
// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
|
|
||||||
// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
|
|
||||||
// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
|
|
||||||
// Note that an inferred script is never guaranteed to be the correct one. Latin is
|
|
||||||
// almost exclusively used for Afrikaans, but Arabic has been used for some texts
|
|
||||||
// in the past. Also, the script that is commonly used may change over time.
|
|
||||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
|
||||||
func (t Tag) Script() (Script, Confidence) {
|
|
||||||
if t.script != 0 {
|
|
||||||
return Script{t.script}, Exact
|
|
||||||
}
|
|
||||||
sc, c := scriptID(_Zzzz), No
|
|
||||||
if t.lang < langNoIndexOffset {
|
|
||||||
if scr := scriptID(suppressScript[t.lang]); scr != 0 {
|
|
||||||
// Note: it is not always the case that a language with a suppress
|
|
||||||
// script value is only written in one script (e.g. kk, ms, pa).
|
|
||||||
if t.region == 0 {
|
|
||||||
return Script{scriptID(scr)}, High
|
|
||||||
}
|
|
||||||
sc, c = scr, High
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tag, err := addTags(t); err == nil {
|
|
||||||
if tag.script != sc {
|
|
||||||
sc, c = tag.script, Low
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t, _ = (Deprecated | Macro).Canonicalize(t)
|
|
||||||
if tag, err := addTags(t); err == nil && tag.script != sc {
|
|
||||||
sc, c = tag.script, Low
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Script{sc}, c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Region returns the region for the language tag. If it was not explicitly given, it will
|
|
||||||
// infer a most likely candidate from the context.
|
|
||||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
|
||||||
func (t Tag) Region() (Region, Confidence) {
|
|
||||||
if t.region != 0 {
|
|
||||||
return Region{t.region}, Exact
|
|
||||||
}
|
|
||||||
if t, err := addTags(t); err == nil {
|
|
||||||
return Region{t.region}, Low // TODO: differentiate between high and low.
|
|
||||||
}
|
|
||||||
t, _ = (Deprecated | Macro).Canonicalize(t)
|
|
||||||
if tag, err := addTags(t); err == nil {
|
|
||||||
return Region{tag.region}, Low
|
|
||||||
}
|
|
||||||
return Region{_ZZ}, No // TODO: return world instead of undetermined?
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variant returns the variants specified explicitly for this language tag.
|
|
||||||
// or nil if no variant was specified.
|
|
||||||
func (t Tag) Variants() []Variant {
|
|
||||||
v := []Variant{}
|
|
||||||
if int(t.pVariant) < int(t.pExt) {
|
|
||||||
for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; {
|
|
||||||
x, str = nextToken(str)
|
|
||||||
v = append(v, Variant{x})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
|
|
||||||
// specific language are substituted with fields from the parent language.
|
|
||||||
// The parent for a language may change for newer versions of CLDR.
|
|
||||||
func (t Tag) Parent() Tag {
|
|
||||||
if t.str != "" {
|
|
||||||
// Strip the variants and extensions.
|
|
||||||
t, _ = Raw.Compose(t.Raw())
|
|
||||||
if t.region == 0 && t.script != 0 && t.lang != 0 {
|
|
||||||
base, _ := addTags(Tag{lang: t.lang})
|
|
||||||
if base.script == t.script {
|
|
||||||
return Tag{lang: t.lang}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
if t.lang != 0 {
|
|
||||||
if t.region != 0 {
|
|
||||||
maxScript := t.script
|
|
||||||
if maxScript == 0 {
|
|
||||||
max, _ := addTags(t)
|
|
||||||
maxScript = max.script
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range parents {
|
|
||||||
if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript {
|
|
||||||
for _, r := range parents[i].fromRegion {
|
|
||||||
if regionID(r) == t.region {
|
|
||||||
return Tag{
|
|
||||||
lang: t.lang,
|
|
||||||
script: scriptID(parents[i].script),
|
|
||||||
region: regionID(parents[i].toRegion),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strip the script if it is the default one.
|
|
||||||
base, _ := addTags(Tag{lang: t.lang})
|
|
||||||
if base.script != maxScript {
|
|
||||||
return Tag{lang: t.lang, script: maxScript}
|
|
||||||
}
|
|
||||||
return Tag{lang: t.lang}
|
|
||||||
} else if t.script != 0 {
|
|
||||||
// The parent for an base-script pair with a non-default script is
|
|
||||||
// "und" instead of the base language.
|
|
||||||
base, _ := addTags(Tag{lang: t.lang})
|
|
||||||
if base.script != t.script {
|
|
||||||
return und
|
|
||||||
}
|
|
||||||
return Tag{lang: t.lang}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return und
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns token t and the rest of the string.
|
|
||||||
func nextToken(s string) (t, tail string) {
|
|
||||||
p := strings.Index(s[1:], "-")
|
|
||||||
if p == -1 {
|
|
||||||
return s[1:], ""
|
|
||||||
}
|
|
||||||
p++
|
|
||||||
return s[1:p], s[p:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extension is a single BCP 47 extension.
|
|
||||||
type Extension struct {
|
|
||||||
s string
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the extension, including the
|
|
||||||
// type tag.
|
|
||||||
func (e Extension) String() string {
|
|
||||||
return e.s
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseExtension parses s as an extension and returns it on success.
|
|
||||||
func ParseExtension(s string) (e Extension, err error) {
|
|
||||||
scan := makeScannerString(s)
|
|
||||||
var end int
|
|
||||||
if n := len(scan.token); n != 1 {
|
|
||||||
return Extension{}, errSyntax
|
|
||||||
}
|
|
||||||
scan.toLower(0, len(scan.b))
|
|
||||||
end = parseExtension(&scan)
|
|
||||||
if end != len(s) {
|
|
||||||
return Extension{}, errSyntax
|
|
||||||
}
|
|
||||||
return Extension{string(scan.b)}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the one-byte extension type of e. It returns 0 for the zero
|
|
||||||
// exception.
|
|
||||||
func (e Extension) Type() byte {
|
|
||||||
if e.s == "" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return e.s[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tokens returns the list of tokens of e.
|
|
||||||
func (e Extension) Tokens() []string {
|
|
||||||
return strings.Split(e.s, "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extension returns the extension of type x for tag t. It will return
|
|
||||||
// false for ok if t does not have the requested extension. The returned
|
|
||||||
// extension will be invalid in this case.
|
|
||||||
func (t Tag) Extension(x byte) (ext Extension, ok bool) {
|
|
||||||
for i := int(t.pExt); i < len(t.str)-1; {
|
|
||||||
var ext string
|
|
||||||
i, ext = getExtension(t.str, i)
|
|
||||||
if ext[0] == x {
|
|
||||||
return Extension{ext}, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Extension{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extensions returns all extensions of t.
|
|
||||||
func (t Tag) Extensions() []Extension {
|
|
||||||
e := []Extension{}
|
|
||||||
for i := int(t.pExt); i < len(t.str)-1; {
|
|
||||||
var ext string
|
|
||||||
i, ext = getExtension(t.str, i)
|
|
||||||
e = append(e, Extension{ext})
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// TypeForKey returns the type associated with the given key, where key and type
|
|
||||||
// are of the allowed values defined for the Unicode locale extension ('u') in
|
|
||||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
|
||||||
// TypeForKey will traverse the inheritance chain to get the correct value.
|
|
||||||
func (t Tag) TypeForKey(key string) string {
|
|
||||||
if start, end, _ := t.findTypeForKey(key); end != start {
|
|
||||||
return t.str[start:end]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errPrivateUse = errors.New("cannot set a key on a private use tag")
|
|
||||||
errInvalidArguments = errors.New("invalid key or type")
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetTypeForKey returns a new Tag with the key set to type, where key and type
|
|
||||||
// are of the allowed values defined for the Unicode locale extension ('u') in
|
|
||||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
|
||||||
// An empty value removes an existing pair with the same key.
|
|
||||||
func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
|
|
||||||
if t.private() {
|
|
||||||
return t, errPrivateUse
|
|
||||||
}
|
|
||||||
if len(key) != 2 {
|
|
||||||
return t, errInvalidArguments
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the setting if value is "".
|
|
||||||
if value == "" {
|
|
||||||
start, end, _ := t.findTypeForKey(key)
|
|
||||||
if start != end {
|
|
||||||
// Remove key tag and leading '-'.
|
|
||||||
start -= 4
|
|
||||||
|
|
||||||
// Remove a possible empty extension.
|
|
||||||
if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' {
|
|
||||||
start -= 2
|
|
||||||
}
|
|
||||||
if start == int(t.pVariant) && end == len(t.str) {
|
|
||||||
t.str = ""
|
|
||||||
t.pVariant, t.pExt = 0, 0
|
|
||||||
} else {
|
|
||||||
t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(value) < 3 || len(value) > 8 {
|
|
||||||
return t, errInvalidArguments
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
buf [maxCoreSize + maxSimpleUExtensionSize]byte
|
|
||||||
uStart int // start of the -u extension.
|
|
||||||
)
|
|
||||||
|
|
||||||
// Generate the tag string if needed.
|
|
||||||
if t.str == "" {
|
|
||||||
uStart = t.genCoreBytes(buf[:])
|
|
||||||
buf[uStart] = '-'
|
|
||||||
uStart++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create new key-type pair and parse it to verify.
|
|
||||||
b := buf[uStart:]
|
|
||||||
copy(b, "u-")
|
|
||||||
copy(b[2:], key)
|
|
||||||
b[4] = '-'
|
|
||||||
b = b[:5+copy(b[5:], value)]
|
|
||||||
scan := makeScanner(b)
|
|
||||||
if parseExtensions(&scan); scan.err != nil {
|
|
||||||
return t, scan.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assemble the replacement string.
|
|
||||||
if t.str == "" {
|
|
||||||
t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
|
|
||||||
t.str = string(buf[:uStart+len(b)])
|
|
||||||
} else {
|
|
||||||
s := t.str
|
|
||||||
start, end, hasExt := t.findTypeForKey(key)
|
|
||||||
if start == end {
|
|
||||||
if hasExt {
|
|
||||||
b = b[2:]
|
|
||||||
}
|
|
||||||
t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:])
|
|
||||||
} else {
|
|
||||||
t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findKeyAndType returns the start and end position for the type corresponding
|
|
||||||
// to key or the point at which to insert the key-value pair if the type
|
|
||||||
// wasn't found. The hasExt return value reports whether an -u extension was present.
|
|
||||||
// Note: the extensions are typically very small and are likely to contain
|
|
||||||
// only one key-type pair.
|
|
||||||
func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) {
|
|
||||||
p := int(t.pExt)
|
|
||||||
if len(key) != 2 || p == len(t.str) || p == 0 {
|
|
||||||
return p, p, false
|
|
||||||
}
|
|
||||||
s := t.str
|
|
||||||
|
|
||||||
// Find the correct extension.
|
|
||||||
for p++; s[p] != 'u'; p++ {
|
|
||||||
if s[p] > 'u' {
|
|
||||||
p--
|
|
||||||
return p, p, false
|
|
||||||
}
|
|
||||||
if p = nextExtension(s, p); p == len(s) {
|
|
||||||
return len(s), len(s), false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Proceed to the hyphen following the extension name.
|
|
||||||
p++
|
|
||||||
|
|
||||||
// curKey is the key currently being processed.
|
|
||||||
curKey := ""
|
|
||||||
|
|
||||||
// Iterate over keys until we get the end of a section.
|
|
||||||
for {
|
|
||||||
// p points to the hyphen preceding the current token.
|
|
||||||
if p3 := p + 3; s[p3] == '-' {
|
|
||||||
// Found a key.
|
|
||||||
// Check whether we just processed the key that was requested.
|
|
||||||
if curKey == key {
|
|
||||||
return start, p, true
|
|
||||||
}
|
|
||||||
// Set to the next key and continue scanning type tokens.
|
|
||||||
curKey = s[p+1 : p3]
|
|
||||||
if curKey > key {
|
|
||||||
return p, p, true
|
|
||||||
}
|
|
||||||
// Start of the type token sequence.
|
|
||||||
start = p + 4
|
|
||||||
// A type is at least 3 characters long.
|
|
||||||
p += 7 // 4 + 3
|
|
||||||
} else {
|
|
||||||
// Attribute or type, which is at least 3 characters long.
|
|
||||||
p += 4
|
|
||||||
}
|
|
||||||
// p points past the third character of a type or attribute.
|
|
||||||
max := p + 5 // maximum length of token plus hyphen.
|
|
||||||
if len(s) < max {
|
|
||||||
max = len(s)
|
|
||||||
}
|
|
||||||
for ; p < max && s[p] != '-'; p++ {
|
|
||||||
}
|
|
||||||
// Bail if we have exhausted all tokens or if the next token starts
|
|
||||||
// a new extension.
|
|
||||||
if p == len(s) || s[p+2] == '-' {
|
|
||||||
if curKey == key {
|
|
||||||
return start, p, true
|
|
||||||
}
|
|
||||||
return p, p, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
|
|
||||||
// for which data exists in the text repository. The index will change over time
|
|
||||||
// and should not be stored in persistent storage. Extensions, except for the
|
|
||||||
// 'va' type of the 'u' extension, are ignored. It will return 0, false if no
|
|
||||||
// compact tag exists, where 0 is the index for the root language (Und).
|
|
||||||
func CompactIndex(t Tag) (index int, ok bool) {
|
|
||||||
// TODO: perhaps give more frequent tags a lower index.
|
|
||||||
// TODO: we could make the indexes stable. This will excluded some
|
|
||||||
// possibilities for optimization, so don't do this quite yet.
|
|
||||||
b, s, r := t.Raw()
|
|
||||||
if len(t.str) > 0 {
|
|
||||||
if strings.HasPrefix(t.str, "x-") {
|
|
||||||
// We have no entries for user-defined tags.
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
if uint16(t.pVariant) != t.pExt {
|
|
||||||
// There are no tags with variants and an u-va type.
|
|
||||||
if t.TypeForKey("va") != "" {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
t, _ = Raw.Compose(b, s, r, t.Variants())
|
|
||||||
} else if _, ok := t.Extension('u'); ok {
|
|
||||||
// Strip all but the 'va' entry.
|
|
||||||
variant := t.TypeForKey("va")
|
|
||||||
t, _ = Raw.Compose(b, s, r)
|
|
||||||
t, _ = t.SetTypeForKey("va", variant)
|
|
||||||
}
|
|
||||||
if len(t.str) > 0 {
|
|
||||||
// We have some variants.
|
|
||||||
for i, s := range specialTags {
|
|
||||||
if s == t {
|
|
||||||
return i + 1, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// No variants specified: just compare core components.
|
|
||||||
// The key has the form lllssrrr, where l, s, and r are nibbles for
|
|
||||||
// respectively the langID, scriptID, and regionID.
|
|
||||||
key := uint32(b.langID) << (8 + 12)
|
|
||||||
key |= uint32(s.scriptID) << 12
|
|
||||||
key |= uint32(r.regionID)
|
|
||||||
x, ok := coreTags[key]
|
|
||||||
return int(x), ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Base is an ISO 639 language code, used for encoding the base language
|
|
||||||
// of a language tag.
|
|
||||||
type Base struct {
|
|
||||||
langID
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBase parses a 2- or 3-letter ISO 639 code.
|
|
||||||
// It returns a ValueError if s is a well-formed but unknown language identifier
|
|
||||||
// or another error if another error occurred.
|
|
||||||
func ParseBase(s string) (Base, error) {
|
|
||||||
if n := len(s); n < 2 || 3 < n {
|
|
||||||
return Base{}, errSyntax
|
|
||||||
}
|
|
||||||
var buf [3]byte
|
|
||||||
l, err := getLangID(buf[:copy(buf[:], s)])
|
|
||||||
return Base{l}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Script is a 4-letter ISO 15924 code for representing scripts.
|
|
||||||
// It is idiomatically represented in title case.
|
|
||||||
type Script struct {
|
|
||||||
scriptID
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseScript parses a 4-letter ISO 15924 code.
|
|
||||||
// It returns a ValueError if s is a well-formed but unknown script identifier
|
|
||||||
// or another error if another error occurred.
|
|
||||||
func ParseScript(s string) (Script, error) {
|
|
||||||
if len(s) != 4 {
|
|
||||||
return Script{}, errSyntax
|
|
||||||
}
|
|
||||||
var buf [4]byte
|
|
||||||
sc, err := getScriptID(script, buf[:copy(buf[:], s)])
|
|
||||||
return Script{sc}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
|
|
||||||
type Region struct {
|
|
||||||
regionID
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeM49 returns the Region for the given UN M.49 code.
|
|
||||||
// It returns an error if r is not a valid code.
|
|
||||||
func EncodeM49(r int) (Region, error) {
|
|
||||||
rid, err := getRegionM49(r)
|
|
||||||
return Region{rid}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
|
|
||||||
// It returns a ValueError if s is a well-formed but unknown region identifier
|
|
||||||
// or another error if another error occurred.
|
|
||||||
func ParseRegion(s string) (Region, error) {
|
|
||||||
if n := len(s); n < 2 || 3 < n {
|
|
||||||
return Region{}, errSyntax
|
|
||||||
}
|
|
||||||
var buf [3]byte
|
|
||||||
r, err := getRegionID(buf[:copy(buf[:], s)])
|
|
||||||
return Region{r}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsCountry returns whether this region is a country or autonomous area. This
|
|
||||||
// includes non-standard definitions from CLDR.
|
|
||||||
func (r Region) IsCountry() bool {
|
|
||||||
if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsGroup returns whether this region defines a collection of regions. This
|
|
||||||
// includes non-standard definitions from CLDR.
|
|
||||||
func (r Region) IsGroup() bool {
|
|
||||||
if r.regionID == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return int(regionInclusion[r.regionID]) < len(regionContainment)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains returns whether Region c is contained by Region r. It returns true
|
|
||||||
// if c == r.
|
|
||||||
func (r Region) Contains(c Region) bool {
|
|
||||||
return r.regionID.contains(c.regionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r regionID) contains(c regionID) bool {
|
|
||||||
if r == c {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
g := regionInclusion[r]
|
|
||||||
if g >= nRegionGroups {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
m := regionContainment[g]
|
|
||||||
|
|
||||||
d := regionInclusion[c]
|
|
||||||
b := regionInclusionBits[d]
|
|
||||||
|
|
||||||
// A contained country may belong to multiple disjoint groups. Matching any
|
|
||||||
// of these indicates containment. If the contained region is a group, it
|
|
||||||
// must strictly be a subset.
|
|
||||||
if d >= nRegionGroups {
|
|
||||||
return b&m != 0
|
|
||||||
}
|
|
||||||
return b&^m == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNoTLD = errors.New("language: region is not a valid ccTLD")
|
|
||||||
|
|
||||||
// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
|
|
||||||
// In all other cases it returns either the region itself or an error.
|
|
||||||
//
|
|
||||||
// This method may return an error for a region for which there exists a
|
|
||||||
// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
|
|
||||||
// region will already be canonicalized it was obtained from a Tag that was
|
|
||||||
// obtained using any of the default methods.
|
|
||||||
func (r Region) TLD() (Region, error) {
|
|
||||||
// See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
|
|
||||||
// difference between ISO 3166-1 and IANA ccTLD.
|
|
||||||
if r.regionID == _GB {
|
|
||||||
r = Region{_UK}
|
|
||||||
}
|
|
||||||
if (r.typ() & ccTLD) == 0 {
|
|
||||||
return Region{}, errNoTLD
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Canonicalize returns the region or a possible replacement if the region is
|
|
||||||
// deprecated. It will not return a replacement for deprecated regions that
|
|
||||||
// are split into multiple regions.
|
|
||||||
func (r Region) Canonicalize() Region {
|
|
||||||
if cr := normRegion(r.regionID); cr != 0 {
|
|
||||||
return Region{cr}
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variant represents a registered variant of a language as defined by BCP 47.
|
|
||||||
type Variant struct {
|
|
||||||
variant string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseVariant parses and returns a Variant. An error is returned if s is not
|
|
||||||
// a valid variant.
|
|
||||||
func ParseVariant(s string) (Variant, error) {
|
|
||||||
s = strings.ToLower(s)
|
|
||||||
if _, ok := variantIndex[s]; ok {
|
|
||||||
return Variant{s}, nil
|
|
||||||
}
|
|
||||||
return Variant{}, mkErrInvalid([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the variant.
|
|
||||||
func (v Variant) String() string {
|
|
||||||
return v.variant
|
|
||||||
}
|
|
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
@ -1,396 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/tag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// findIndex tries to find the given tag in idx and returns a standardized error
|
|
||||||
// if it could not be found.
|
|
||||||
func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
|
|
||||||
if !tag.FixCase(form, key) {
|
|
||||||
return 0, errSyntax
|
|
||||||
}
|
|
||||||
i := idx.Index(key)
|
|
||||||
if i == -1 {
|
|
||||||
return 0, mkErrInvalid(key)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func searchUint(imap []uint16, key uint16) int {
|
|
||||||
return sort.Search(len(imap), func(i int) bool {
|
|
||||||
return imap[i] >= key
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type langID uint16
|
|
||||||
|
|
||||||
// getLangID returns the langID of s if s is a canonical subtag
|
|
||||||
// or langUnknown if s is not a canonical subtag.
|
|
||||||
func getLangID(s []byte) (langID, error) {
|
|
||||||
if len(s) == 2 {
|
|
||||||
return getLangISO2(s)
|
|
||||||
}
|
|
||||||
return getLangISO3(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapLang returns the mapped langID of id according to mapping m.
|
|
||||||
func normLang(id langID) (langID, langAliasType) {
|
|
||||||
k := sort.Search(len(langAliasMap), func(i int) bool {
|
|
||||||
return langAliasMap[i].from >= uint16(id)
|
|
||||||
})
|
|
||||||
if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) {
|
|
||||||
return langID(langAliasMap[k].to), langAliasTypes[k]
|
|
||||||
}
|
|
||||||
return id, langAliasTypeUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
// getLangISO2 returns the langID for the given 2-letter ISO language code
|
|
||||||
// or unknownLang if this does not exist.
|
|
||||||
func getLangISO2(s []byte) (langID, error) {
|
|
||||||
if !tag.FixCase("zz", s) {
|
|
||||||
return 0, errSyntax
|
|
||||||
}
|
|
||||||
if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
|
|
||||||
return langID(i), nil
|
|
||||||
}
|
|
||||||
return 0, mkErrInvalid(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
const base = 'z' - 'a' + 1
|
|
||||||
|
|
||||||
func strToInt(s []byte) uint {
|
|
||||||
v := uint(0)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
v *= base
|
|
||||||
v += uint(s[i] - 'a')
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// converts the given integer to the original ASCII string passed to strToInt.
|
|
||||||
// len(s) must match the number of characters obtained.
|
|
||||||
func intToStr(v uint, s []byte) {
|
|
||||||
for i := len(s) - 1; i >= 0; i-- {
|
|
||||||
s[i] = byte(v%base) + 'a'
|
|
||||||
v /= base
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getLangISO3 returns the langID for the given 3-letter ISO language code
|
|
||||||
// or unknownLang if this does not exist.
|
|
||||||
func getLangISO3(s []byte) (langID, error) {
|
|
||||||
if tag.FixCase("und", s) {
|
|
||||||
// first try to match canonical 3-letter entries
|
|
||||||
for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
|
|
||||||
if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] {
|
|
||||||
// We treat "und" as special and always translate it to "unspecified".
|
|
||||||
// Note that ZZ and Zzzz are private use and are not treated as
|
|
||||||
// unspecified by default.
|
|
||||||
id := langID(i)
|
|
||||||
if id == nonCanonicalUnd {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i := altLangISO3.Index(s); i != -1 {
|
|
||||||
return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil
|
|
||||||
}
|
|
||||||
n := strToInt(s)
|
|
||||||
if langNoIndex[n/8]&(1<<(n%8)) != 0 {
|
|
||||||
return langID(n) + langNoIndexOffset, nil
|
|
||||||
}
|
|
||||||
// Check for non-canonical uses of ISO3.
|
|
||||||
for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
|
|
||||||
if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
|
||||||
return langID(i), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, mkErrInvalid(s)
|
|
||||||
}
|
|
||||||
return 0, errSyntax
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringToBuf writes the string to b and returns the number of bytes
|
|
||||||
// written. cap(b) must be >= 3.
|
|
||||||
func (id langID) stringToBuf(b []byte) int {
|
|
||||||
if id >= langNoIndexOffset {
|
|
||||||
intToStr(uint(id)-langNoIndexOffset, b[:3])
|
|
||||||
return 3
|
|
||||||
} else if id == 0 {
|
|
||||||
return copy(b, "und")
|
|
||||||
}
|
|
||||||
l := lang[id<<2:]
|
|
||||||
if l[3] == 0 {
|
|
||||||
return copy(b, l[:3])
|
|
||||||
}
|
|
||||||
return copy(b, l[:2])
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the BCP 47 representation of the langID.
|
|
||||||
// Use b as variable name, instead of id, to ensure the variable
|
|
||||||
// used is consistent with that of Base in which this type is embedded.
|
|
||||||
func (b langID) String() string {
|
|
||||||
if b == 0 {
|
|
||||||
return "und"
|
|
||||||
} else if b >= langNoIndexOffset {
|
|
||||||
b -= langNoIndexOffset
|
|
||||||
buf := [3]byte{}
|
|
||||||
intToStr(uint(b), buf[:])
|
|
||||||
return string(buf[:])
|
|
||||||
}
|
|
||||||
l := lang.Elem(int(b))
|
|
||||||
if l[3] == 0 {
|
|
||||||
return l[:3]
|
|
||||||
}
|
|
||||||
return l[:2]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ISO3 returns the ISO 639-3 language code.
|
|
||||||
func (b langID) ISO3() string {
|
|
||||||
if b == 0 || b >= langNoIndexOffset {
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
l := lang.Elem(int(b))
|
|
||||||
if l[3] == 0 {
|
|
||||||
return l[:3]
|
|
||||||
} else if l[2] == 0 {
|
|
||||||
return altLangISO3.Elem(int(l[3]))[:3]
|
|
||||||
}
|
|
||||||
// This allocation will only happen for 3-letter ISO codes
|
|
||||||
// that are non-canonical BCP 47 language identifiers.
|
|
||||||
return l[0:1] + l[2:4]
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPrivateUse reports whether this language code is reserved for private use.
|
|
||||||
func (b langID) IsPrivateUse() bool {
|
|
||||||
return langPrivateStart <= b && b <= langPrivateEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
type regionID uint16
|
|
||||||
|
|
||||||
// getRegionID returns the region id for s if s is a valid 2-letter region code
|
|
||||||
// or unknownRegion.
|
|
||||||
func getRegionID(s []byte) (regionID, error) {
|
|
||||||
if len(s) == 3 {
|
|
||||||
if isAlpha(s[0]) {
|
|
||||||
return getRegionISO3(s)
|
|
||||||
}
|
|
||||||
if i, err := strconv.ParseUint(string(s), 10, 10); err == nil {
|
|
||||||
return getRegionM49(int(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return getRegionISO2(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRegionISO2 returns the regionID for the given 2-letter ISO country code
|
|
||||||
// or unknownRegion if this does not exist.
|
|
||||||
func getRegionISO2(s []byte) (regionID, error) {
|
|
||||||
i, err := findIndex(regionISO, s, "ZZ")
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return regionID(i) + isoRegionOffset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRegionISO3 returns the regionID for the given 3-letter ISO country code
|
|
||||||
// or unknownRegion if this does not exist.
|
|
||||||
func getRegionISO3(s []byte) (regionID, error) {
|
|
||||||
if tag.FixCase("ZZZ", s) {
|
|
||||||
for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
|
|
||||||
if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
|
||||||
return regionID(i) + isoRegionOffset, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := 0; i < len(altRegionISO3); i += 3 {
|
|
||||||
if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
|
|
||||||
return regionID(altRegionIDs[i/3]), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, mkErrInvalid(s)
|
|
||||||
}
|
|
||||||
return 0, errSyntax
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRegionM49(n int) (regionID, error) {
|
|
||||||
if 0 < n && n <= 999 {
|
|
||||||
const (
|
|
||||||
searchBits = 7
|
|
||||||
regionBits = 9
|
|
||||||
regionMask = 1<<regionBits - 1
|
|
||||||
)
|
|
||||||
idx := n >> searchBits
|
|
||||||
buf := fromM49[m49Index[idx]:m49Index[idx+1]]
|
|
||||||
val := uint16(n) << regionBits // we rely on bits shifting out
|
|
||||||
i := sort.Search(len(buf), func(i int) bool {
|
|
||||||
return buf[i] >= val
|
|
||||||
})
|
|
||||||
if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
|
|
||||||
return regionID(r & regionMask), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var e ValueError
|
|
||||||
fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n)
|
|
||||||
return 0, e
|
|
||||||
}
|
|
||||||
|
|
||||||
// normRegion returns a region if r is deprecated or 0 otherwise.
|
|
||||||
// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
|
|
||||||
// TODO: consider mapping split up regions to new most populous one (like CLDR).
|
|
||||||
func normRegion(r regionID) regionID {
|
|
||||||
m := regionOldMap
|
|
||||||
k := sort.Search(len(m), func(i int) bool {
|
|
||||||
return m[i].from >= uint16(r)
|
|
||||||
})
|
|
||||||
if k < len(m) && m[k].from == uint16(r) {
|
|
||||||
return regionID(m[k].to)
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
iso3166UserAssigned = 1 << iota
|
|
||||||
ccTLD
|
|
||||||
bcp47Region
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r regionID) typ() byte {
|
|
||||||
return regionTypes[r]
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the BCP 47 representation for the region.
|
|
||||||
// It returns "ZZ" for an unspecified region.
|
|
||||||
func (r regionID) String() string {
|
|
||||||
if r < isoRegionOffset {
|
|
||||||
if r == 0 {
|
|
||||||
return "ZZ"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%03d", r.M49())
|
|
||||||
}
|
|
||||||
r -= isoRegionOffset
|
|
||||||
return regionISO.Elem(int(r))[:2]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ISO3 returns the 3-letter ISO code of r.
|
|
||||||
// Note that not all regions have a 3-letter ISO code.
|
|
||||||
// In such cases this method returns "ZZZ".
|
|
||||||
func (r regionID) ISO3() string {
|
|
||||||
if r < isoRegionOffset {
|
|
||||||
return "ZZZ"
|
|
||||||
}
|
|
||||||
r -= isoRegionOffset
|
|
||||||
reg := regionISO.Elem(int(r))
|
|
||||||
switch reg[2] {
|
|
||||||
case 0:
|
|
||||||
return altRegionISO3[reg[3]:][:3]
|
|
||||||
case ' ':
|
|
||||||
return "ZZZ"
|
|
||||||
}
|
|
||||||
return reg[0:1] + reg[2:4]
|
|
||||||
}
|
|
||||||
|
|
||||||
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
|
|
||||||
// is not defined for r.
|
|
||||||
func (r regionID) M49() int {
|
|
||||||
return int(m49[r])
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
|
|
||||||
// may include private-use tags that are assigned by CLDR and used in this
|
|
||||||
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
|
|
||||||
func (r regionID) IsPrivateUse() bool {
|
|
||||||
return r.typ()&iso3166UserAssigned != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type scriptID uint8
|
|
||||||
|
|
||||||
// getScriptID returns the script id for string s. It assumes that s
|
|
||||||
// is of the format [A-Z][a-z]{3}.
|
|
||||||
func getScriptID(idx tag.Index, s []byte) (scriptID, error) {
|
|
||||||
i, err := findIndex(idx, s, "Zzzz")
|
|
||||||
return scriptID(i), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the script code in title case.
|
|
||||||
// It returns "Zzzz" for an unspecified script.
|
|
||||||
func (s scriptID) String() string {
|
|
||||||
if s == 0 {
|
|
||||||
return "Zzzz"
|
|
||||||
}
|
|
||||||
return script.Elem(int(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPrivateUse reports whether this script code is reserved for private use.
|
|
||||||
func (s scriptID) IsPrivateUse() bool {
|
|
||||||
return _Qaaa <= s && s <= _Qabx
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxAltTaglen = len("en-US-POSIX")
|
|
||||||
maxLen = maxAltTaglen
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// grandfatheredMap holds a mapping from legacy and grandfathered tags to
|
|
||||||
// their base language or index to more elaborate tag.
|
|
||||||
grandfatheredMap = map[[maxLen]byte]int16{
|
|
||||||
[maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
|
|
||||||
[maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
|
|
||||||
[maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
|
|
||||||
[maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
|
|
||||||
[maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
|
|
||||||
[maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
|
|
||||||
[maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
|
|
||||||
[maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
|
|
||||||
[maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
|
|
||||||
[maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
|
|
||||||
[maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
|
|
||||||
[maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
|
|
||||||
[maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
|
|
||||||
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
|
|
||||||
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
|
|
||||||
[maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
|
|
||||||
[maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
|
|
||||||
[maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
|
|
||||||
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
|
|
||||||
[maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
|
|
||||||
|
|
||||||
// Grandfathered tags with no modern replacement will be converted as
|
|
||||||
// follows:
|
|
||||||
[maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
|
|
||||||
[maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
|
|
||||||
[maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
|
|
||||||
[maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
|
|
||||||
[maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
|
|
||||||
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
|
|
||||||
|
|
||||||
// CLDR-specific tag.
|
|
||||||
[maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
|
|
||||||
[maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
|
|
||||||
}
|
|
||||||
|
|
||||||
altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
|
|
||||||
|
|
||||||
altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix"
|
|
||||||
)
|
|
||||||
|
|
||||||
func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
|
|
||||||
if v, ok := grandfatheredMap[s]; ok {
|
|
||||||
if v < 0 {
|
|
||||||
return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
|
|
||||||
}
|
|
||||||
t.lang = langID(v)
|
|
||||||
return t, true
|
|
||||||
}
|
|
||||||
return t, false
|
|
||||||
}
|
|
841
vendor/golang.org/x/text/language/match.go
generated
vendored
841
vendor/golang.org/x/text/language/match.go
generated
vendored
@ -1,841 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
// Matcher is the interface that wraps the Match method.
|
|
||||||
//
|
|
||||||
// Match returns the best match for any of the given tags, along with
|
|
||||||
// a unique index associated with the returned tag and a confidence
|
|
||||||
// score.
|
|
||||||
type Matcher interface {
|
|
||||||
Match(t ...Tag) (tag Tag, index int, c Confidence)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Comprehends reports the confidence score for a speaker of a given language
|
|
||||||
// to being able to comprehend the written form of an alternative language.
|
|
||||||
func Comprehends(speaker, alternative Tag) Confidence {
|
|
||||||
_, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMatcher returns a Matcher that matches an ordered list of preferred tags
|
|
||||||
// against a list of supported tags based on written intelligibility, closeness
|
|
||||||
// of dialect, equivalence of subtags and various other rules. It is initialized
|
|
||||||
// with the list of supported tags. The first element is used as the default
|
|
||||||
// value in case no match is found.
|
|
||||||
//
|
|
||||||
// Its Match method matches the first of the given Tags to reach a certain
|
|
||||||
// confidence threshold. The tags passed to Match should therefore be specified
|
|
||||||
// in order of preference. Extensions are ignored for matching.
|
|
||||||
//
|
|
||||||
// The index returned by the Match method corresponds to the index of the
|
|
||||||
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
|
||||||
// corresponding preferred tag. This allows user locale options to be passed
|
|
||||||
// transparently.
|
|
||||||
func NewMatcher(t []Tag) Matcher {
|
|
||||||
return newMatcher(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
|
||||||
match, w, c := m.getBest(want...)
|
|
||||||
if match == nil {
|
|
||||||
t = m.default_.tag
|
|
||||||
} else {
|
|
||||||
t, index = match.tag, match.index
|
|
||||||
}
|
|
||||||
// Copy options from the user-provided tag into the result tag. This is hard
|
|
||||||
// to do after the fact, so we do it here.
|
|
||||||
// TODO: consider also adding in variants that are compatible with the
|
|
||||||
// matched language.
|
|
||||||
// TODO: Add back region if it is non-ambiguous? Or create another tag to
|
|
||||||
// preserve the region?
|
|
||||||
if u, ok := w.Extension('u'); ok {
|
|
||||||
t, _ = Raw.Compose(t, u)
|
|
||||||
}
|
|
||||||
return t, index, c
|
|
||||||
}
|
|
||||||
|
|
||||||
type scriptRegionFlags uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
isList = 1 << iota
|
|
||||||
scriptInFrom
|
|
||||||
regionInFrom
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t *Tag) setUndefinedLang(id langID) {
|
|
||||||
if t.lang == 0 {
|
|
||||||
t.lang = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) setUndefinedScript(id scriptID) {
|
|
||||||
if t.script == 0 {
|
|
||||||
t.script = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) setUndefinedRegion(id regionID) {
|
|
||||||
if t.region == 0 || t.region.contains(id) {
|
|
||||||
t.region = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrMissingLikelyTagsData indicates no information was available
|
|
||||||
// to compute likely values of missing tags.
|
|
||||||
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
|
|
||||||
|
|
||||||
// addLikelySubtags sets subtags to their most likely value, given the locale.
|
|
||||||
// In most cases this means setting fields for unknown values, but in some
|
|
||||||
// cases it may alter a value. It returns a ErrMissingLikelyTagsData error
|
|
||||||
// if the given locale cannot be expanded.
|
|
||||||
func (t Tag) addLikelySubtags() (Tag, error) {
|
|
||||||
id, err := addTags(t)
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
} else if id.equalTags(t) {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
id.remakeString()
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// specializeRegion attempts to specialize a group region.
|
|
||||||
func specializeRegion(t *Tag) bool {
|
|
||||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
|
||||||
x := likelyRegionGroup[i]
|
|
||||||
if langID(x.lang) == t.lang && scriptID(x.script) == t.script {
|
|
||||||
t.region = regionID(x.region)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func addTags(t Tag) (Tag, error) {
|
|
||||||
// We leave private use identifiers alone.
|
|
||||||
if t.private() {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
if t.script != 0 && t.region != 0 {
|
|
||||||
if t.lang != 0 {
|
|
||||||
// already fully specified
|
|
||||||
specializeRegion(&t)
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
// Search matches for und-script-region. Note that for these cases
|
|
||||||
// region will never be a group so there is no need to check for this.
|
|
||||||
list := likelyRegion[t.region : t.region+1]
|
|
||||||
if x := list[0]; x.flags&isList != 0 {
|
|
||||||
list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
|
|
||||||
}
|
|
||||||
for _, x := range list {
|
|
||||||
// Deviating from the spec. See match_test.go for details.
|
|
||||||
if scriptID(x.script) == t.script {
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t.lang != 0 {
|
|
||||||
// Search matches for lang-script and lang-region, where lang != und.
|
|
||||||
if t.lang < langNoIndexOffset {
|
|
||||||
x := likelyLang[t.lang]
|
|
||||||
if x.flags&isList != 0 {
|
|
||||||
list := likelyLangList[x.region : x.region+uint16(x.script)]
|
|
||||||
if t.script != 0 {
|
|
||||||
for _, x := range list {
|
|
||||||
if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 {
|
|
||||||
t.setUndefinedRegion(regionID(x.region))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if t.region != 0 {
|
|
||||||
count := 0
|
|
||||||
goodScript := true
|
|
||||||
tt := t
|
|
||||||
for _, x := range list {
|
|
||||||
// We visit all entries for which the script was not
|
|
||||||
// defined, including the ones where the region was not
|
|
||||||
// defined. This allows for proper disambiguation within
|
|
||||||
// regions.
|
|
||||||
if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) {
|
|
||||||
tt.region = regionID(x.region)
|
|
||||||
tt.setUndefinedScript(scriptID(x.script))
|
|
||||||
goodScript = goodScript && tt.script == scriptID(x.script)
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if count == 1 {
|
|
||||||
return tt, nil
|
|
||||||
}
|
|
||||||
// Even if we fail to find a unique Region, we might have
|
|
||||||
// an unambiguous script.
|
|
||||||
if goodScript {
|
|
||||||
t.script = tt.script
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Search matches for und-script.
|
|
||||||
if t.script != 0 {
|
|
||||||
x := likelyScript[t.script]
|
|
||||||
if x.region != 0 {
|
|
||||||
t.setUndefinedRegion(regionID(x.region))
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Search matches for und-region. If und-script-region exists, it would
|
|
||||||
// have been found earlier.
|
|
||||||
if t.region != 0 {
|
|
||||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
|
||||||
x := likelyRegionGroup[i]
|
|
||||||
if x.region != 0 {
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
t.setUndefinedScript(scriptID(x.script))
|
|
||||||
t.region = regionID(x.region)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
x := likelyRegion[t.region]
|
|
||||||
if x.flags&isList != 0 {
|
|
||||||
x = likelyRegionList[x.lang]
|
|
||||||
}
|
|
||||||
if x.script != 0 && x.flags != scriptInFrom {
|
|
||||||
t.setUndefinedLang(langID(x.lang))
|
|
||||||
t.setUndefinedScript(scriptID(x.script))
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search matches for lang.
|
|
||||||
if t.lang < langNoIndexOffset {
|
|
||||||
x := likelyLang[t.lang]
|
|
||||||
if x.flags&isList != 0 {
|
|
||||||
x = likelyLangList[x.region]
|
|
||||||
}
|
|
||||||
if x.region != 0 {
|
|
||||||
t.setUndefinedScript(scriptID(x.script))
|
|
||||||
t.setUndefinedRegion(regionID(x.region))
|
|
||||||
}
|
|
||||||
specializeRegion(&t)
|
|
||||||
if t.lang == 0 {
|
|
||||||
t.lang = _en // default language
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
return t, ErrMissingLikelyTagsData
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) setTagsFrom(id Tag) {
|
|
||||||
t.lang = id.lang
|
|
||||||
t.script = id.script
|
|
||||||
t.region = id.region
|
|
||||||
}
|
|
||||||
|
|
||||||
// minimize removes the region or script subtags from t such that
|
|
||||||
// t.addLikelySubtags() == t.minimize().addLikelySubtags().
|
|
||||||
func (t Tag) minimize() (Tag, error) {
|
|
||||||
t, err := minimizeTags(t)
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
t.remakeString()
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// minimizeTags mimics the behavior of the ICU 51 C implementation.
|
|
||||||
func minimizeTags(t Tag) (Tag, error) {
|
|
||||||
if t.equalTags(und) {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
max, err := addTags(t)
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
for _, id := range [...]Tag{
|
|
||||||
{lang: t.lang},
|
|
||||||
{lang: t.lang, region: t.region},
|
|
||||||
{lang: t.lang, script: t.script},
|
|
||||||
} {
|
|
||||||
if x, err := addTags(id); err == nil && max.equalTags(x) {
|
|
||||||
t.setTagsFrom(id)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tag Matching
|
|
||||||
// CLDR defines an algorithm for finding the best match between two sets of language
|
|
||||||
// tags. The basic algorithm defines how to score a possible match and then find
|
|
||||||
// the match with the best score
|
|
||||||
// (see http://www.unicode.org/reports/tr35/#LanguageMatching).
|
|
||||||
// Using scoring has several disadvantages. The scoring obfuscates the importance of
|
|
||||||
// the various factors considered, making the algorithm harder to understand. Using
|
|
||||||
// scoring also requires the full score to be computed for each pair of tags.
|
|
||||||
//
|
|
||||||
// We will use a different algorithm which aims to have the following properties:
|
|
||||||
// - clarity on the precedence of the various selection factors, and
|
|
||||||
// - improved performance by allowing early termination of a comparison.
|
|
||||||
//
|
|
||||||
// Matching algorithm (overview)
|
|
||||||
// Input:
|
|
||||||
// - supported: a set of supported tags
|
|
||||||
// - default: the default tag to return in case there is no match
|
|
||||||
// - desired: list of desired tags, ordered by preference, starting with
|
|
||||||
// the most-preferred.
|
|
||||||
//
|
|
||||||
// Algorithm:
|
|
||||||
// 1) Set the best match to the lowest confidence level
|
|
||||||
// 2) For each tag in "desired":
|
|
||||||
// a) For each tag in "supported":
|
|
||||||
// 1) compute the match between the two tags.
|
|
||||||
// 2) if the match is better than the previous best match, replace it
|
|
||||||
// with the new match. (see next section)
|
|
||||||
// b) if the current best match is above a certain threshold, return this
|
|
||||||
// match without proceeding to the next tag in "desired". [See Note 1]
|
|
||||||
// 3) If the best match so far is below a certain threshold, return "default".
|
|
||||||
//
|
|
||||||
// Ranking:
|
|
||||||
// We use two phases to determine whether one pair of tags are a better match
|
|
||||||
// than another pair of tags. First, we determine a rough confidence level. If the
|
|
||||||
// levels are different, the one with the highest confidence wins.
|
|
||||||
// Second, if the rough confidence levels are identical, we use a set of tie-breaker
|
|
||||||
// rules.
|
|
||||||
//
|
|
||||||
// The confidence level of matching a pair of tags is determined by finding the
|
|
||||||
// lowest confidence level of any matches of the corresponding subtags (the
|
|
||||||
// result is deemed as good as its weakest link).
|
|
||||||
// We define the following levels:
|
|
||||||
// Exact - An exact match of a subtag, before adding likely subtags.
|
|
||||||
// MaxExact - An exact match of a subtag, after adding likely subtags.
|
|
||||||
// [See Note 2].
|
|
||||||
// High - High level of mutual intelligibility between different subtag
|
|
||||||
// variants.
|
|
||||||
// Low - Low level of mutual intelligibility between different subtag
|
|
||||||
// variants.
|
|
||||||
// No - No mutual intelligibility.
|
|
||||||
//
|
|
||||||
// The following levels can occur for each type of subtag:
|
|
||||||
// Base: Exact, MaxExact, High, Low, No
|
|
||||||
// Script: Exact, MaxExact [see Note 3], Low, No
|
|
||||||
// Region: Exact, MaxExact, High
|
|
||||||
// Variant: Exact, High
|
|
||||||
// Private: Exact, No
|
|
||||||
//
|
|
||||||
// Any result with a confidence level of Low or higher is deemed a possible match.
|
|
||||||
// Once a desired tag matches any of the supported tags with a level of MaxExact
|
|
||||||
// or higher, the next desired tag is not considered (see Step 2.b).
|
|
||||||
// Note that CLDR provides languageMatching data that defines close equivalence
|
|
||||||
// classes for base languages, scripts and regions.
|
|
||||||
//
|
|
||||||
// Tie-breaking
|
|
||||||
// If we get the same confidence level for two matches, we apply a sequence of
|
|
||||||
// tie-breaking rules. The first that succeeds defines the result. The rules are
|
|
||||||
// applied in the following order.
|
|
||||||
// 1) Original language was defined and was identical.
|
|
||||||
// 2) Original region was defined and was identical.
|
|
||||||
// 3) Distance between two maximized regions was the smallest.
|
|
||||||
// 4) Original script was defined and was identical.
|
|
||||||
// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
|
|
||||||
// If there is still no winner after these rules are applied, the first match
|
|
||||||
// found wins.
|
|
||||||
//
|
|
||||||
// Notes:
|
|
||||||
// [1] Note that even if we may not have a perfect match, if a match is above a
|
|
||||||
// certain threshold, it is considered a better match than any other match
|
|
||||||
// to a tag later in the list of preferred language tags.
|
|
||||||
// [2] In practice, as matching of Exact is done in a separate phase from
|
|
||||||
// matching the other levels, we reuse the Exact level to mean MaxExact in
|
|
||||||
// the second phase. As a consequence, we only need the levels defined by
|
|
||||||
// the Confidence type. The MaxExact confidence level is mapped to High in
|
|
||||||
// the public API.
|
|
||||||
// [3] We do not differentiate between maximized script values that were derived
|
|
||||||
// from suppressScript versus most likely tag data. We determined that in
|
|
||||||
// ranking the two, one ranks just after the other. Moreover, the two cannot
|
|
||||||
// occur concurrently. As a consequence, they are identical for practical
|
|
||||||
// purposes.
|
|
||||||
// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
|
|
||||||
// the MaxExact level to allow iw vs he to still be a closer match than
|
|
||||||
// en-AU vs en-US, for example.
|
|
||||||
// [5] In CLDR a locale inherits fields that are unspecified for this locale
|
|
||||||
// from its parent. Therefore, if a locale is a parent of another locale,
|
|
||||||
// it is a strong measure for closeness, especially when no other tie
|
|
||||||
// breaker rule applies. One could also argue it is inconsistent, for
|
|
||||||
// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
|
|
||||||
// though its parent is pt-PT according to the inheritance rules.
|
|
||||||
//
|
|
||||||
// Implementation Details:
|
|
||||||
// There are several performance considerations worth pointing out. Most notably,
|
|
||||||
// we preprocess as much as possible (within reason) at the time of creation of a
|
|
||||||
// matcher. This includes:
|
|
||||||
// - creating a per-language map, which includes data for the raw base language
|
|
||||||
// and its canonicalized variant (if applicable),
|
|
||||||
// - expanding entries for the equivalence classes defined in CLDR's
|
|
||||||
// languageMatch data.
|
|
||||||
// The per-language map ensures that typically only a very small number of tags
|
|
||||||
// need to be considered. The pre-expansion of canonicalized subtags and
|
|
||||||
// equivalence classes reduces the amount of map lookups that need to be done at
|
|
||||||
// runtime.
|
|
||||||
|
|
||||||
// matcher keeps a set of supported language tags, indexed by language.
|
|
||||||
type matcher struct {
|
|
||||||
default_ *haveTag
|
|
||||||
index map[langID]*matchHeader
|
|
||||||
passSettings bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchHeader has the lists of tags for exact matches and matches based on
|
|
||||||
// maximized and canonicalized tags for a given language.
|
|
||||||
type matchHeader struct {
|
|
||||||
exact []*haveTag
|
|
||||||
max []*haveTag
|
|
||||||
}
|
|
||||||
|
|
||||||
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
|
||||||
// or canonicalized language is not stored as it is not needed during matching.
|
|
||||||
type haveTag struct {
|
|
||||||
tag Tag
|
|
||||||
|
|
||||||
// index of this tag in the original list of supported tags.
|
|
||||||
index int
|
|
||||||
|
|
||||||
// conf is the maximum confidence that can result from matching this haveTag.
|
|
||||||
// When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
|
|
||||||
conf Confidence
|
|
||||||
|
|
||||||
// Maximized region and script.
|
|
||||||
maxRegion regionID
|
|
||||||
maxScript scriptID
|
|
||||||
|
|
||||||
// altScript may be checked as an alternative match to maxScript. If altScript
|
|
||||||
// matches, the confidence level for this match is Low. Theoretically there
|
|
||||||
// could be multiple alternative scripts. This does not occur in practice.
|
|
||||||
altScript scriptID
|
|
||||||
|
|
||||||
// nextMax is the index of the next haveTag with the same maximized tags.
|
|
||||||
nextMax uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHaveTag(tag Tag, index int) (haveTag, langID) {
|
|
||||||
max := tag
|
|
||||||
if tag.lang != 0 {
|
|
||||||
max, _ = max.canonicalize(All)
|
|
||||||
max, _ = addTags(max)
|
|
||||||
max.remakeString()
|
|
||||||
}
|
|
||||||
return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang
|
|
||||||
}
|
|
||||||
|
|
||||||
// altScript returns an alternative script that may match the given script with
|
|
||||||
// a low confidence. At the moment, the langMatch data allows for at most one
|
|
||||||
// script to map to another and we rely on this to keep the code simple.
|
|
||||||
func altScript(l langID, s scriptID) scriptID {
|
|
||||||
for _, alt := range matchScript {
|
|
||||||
if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s {
|
|
||||||
return scriptID(alt.want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
|
|
||||||
// Tags that have the same maximized values are linked by index.
|
|
||||||
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
|
||||||
// Don't add new exact matches.
|
|
||||||
for _, v := range h.exact {
|
|
||||||
if v.tag.equalsRest(n.tag) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if exact {
|
|
||||||
h.exact = append(h.exact, &n)
|
|
||||||
}
|
|
||||||
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
|
||||||
// comparing the equivalents and bail out.
|
|
||||||
for i, v := range h.max {
|
|
||||||
if v.maxScript == n.maxScript &&
|
|
||||||
v.maxRegion == n.maxRegion &&
|
|
||||||
v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() {
|
|
||||||
for h.max[i].nextMax != 0 {
|
|
||||||
i = int(h.max[i].nextMax)
|
|
||||||
}
|
|
||||||
h.max[i].nextMax = uint16(len(h.max))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
h.max = append(h.max, &n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// header returns the matchHeader for the given language. It creates one if
|
|
||||||
// it doesn't already exist.
|
|
||||||
func (m *matcher) header(l langID) *matchHeader {
|
|
||||||
if h := m.index[l]; h != nil {
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
h := &matchHeader{}
|
|
||||||
m.index[l] = h
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// newMatcher builds an index for the given supported tags and returns it as
|
|
||||||
// a matcher. It also expands the index by considering various equivalence classes
|
|
||||||
// for a given tag.
|
|
||||||
func newMatcher(supported []Tag) *matcher {
|
|
||||||
m := &matcher{
|
|
||||||
index: make(map[langID]*matchHeader),
|
|
||||||
}
|
|
||||||
if len(supported) == 0 {
|
|
||||||
m.default_ = &haveTag{}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
// Add supported languages to the index. Add exact matches first to give
|
|
||||||
// them precedence.
|
|
||||||
for i, tag := range supported {
|
|
||||||
pair, _ := makeHaveTag(tag, i)
|
|
||||||
m.header(tag.lang).addIfNew(pair, true)
|
|
||||||
}
|
|
||||||
m.default_ = m.header(supported[0].lang).exact[0]
|
|
||||||
for i, tag := range supported {
|
|
||||||
pair, max := makeHaveTag(tag, i)
|
|
||||||
if max != tag.lang {
|
|
||||||
m.header(max).addIfNew(pair, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// update is used to add indexes in the map for equivalent languages.
|
|
||||||
// If force is true, the update will also apply to derived entries. To
|
|
||||||
// avoid applying a "transitive closure", use false.
|
|
||||||
update := func(want, have uint16, conf Confidence, force bool) {
|
|
||||||
if hh := m.index[langID(have)]; hh != nil {
|
|
||||||
if !force && len(hh.exact) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hw := m.header(langID(want))
|
|
||||||
for _, ht := range hh.max {
|
|
||||||
v := *ht
|
|
||||||
if conf < v.conf {
|
|
||||||
v.conf = conf
|
|
||||||
}
|
|
||||||
v.nextMax = 0 // this value needs to be recomputed
|
|
||||||
if v.altScript != 0 {
|
|
||||||
v.altScript = altScript(langID(want), v.maxScript)
|
|
||||||
}
|
|
||||||
hw.addIfNew(v, conf == Exact && len(hh.exact) > 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
|
||||||
// languageMatch data.
|
|
||||||
for _, ml := range matchLang {
|
|
||||||
update(ml.want, ml.have, Confidence(ml.conf), false)
|
|
||||||
if !ml.oneway {
|
|
||||||
update(ml.have, ml.want, Confidence(ml.conf), false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add entries for possible canonicalizations. This is an optimization to
|
|
||||||
// ensure that only one map lookup needs to be done at runtime per desired tag.
|
|
||||||
// First we match deprecated equivalents. If they are perfect equivalents
|
|
||||||
// (their canonicalization simply substitutes a different language code, but
|
|
||||||
// nothing else), the match confidence is Exact, otherwise it is High.
|
|
||||||
for i, lm := range langAliasMap {
|
|
||||||
if lm.from == _sh {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If deprecated codes match and there is no fiddling with the script or
|
|
||||||
// or region, we consider it an exact match.
|
|
||||||
conf := Exact
|
|
||||||
if langAliasTypes[i] != langMacro {
|
|
||||||
if !isExactEquivalent(langID(lm.from)) {
|
|
||||||
conf = High
|
|
||||||
}
|
|
||||||
update(lm.to, lm.from, conf, true)
|
|
||||||
}
|
|
||||||
update(lm.from, lm.to, conf, true)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBest gets the best matching tag in m for any of the given tags, taking into
|
|
||||||
// account the order of preference of the given tags.
|
|
||||||
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
|
||||||
best := bestMatch{}
|
|
||||||
for _, w := range want {
|
|
||||||
var max Tag
|
|
||||||
// Check for exact match first.
|
|
||||||
h := m.index[w.lang]
|
|
||||||
if w.lang != 0 {
|
|
||||||
// Base language is defined.
|
|
||||||
if h == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for i := range h.exact {
|
|
||||||
have := h.exact[i]
|
|
||||||
if have.tag.equalsRest(w) {
|
|
||||||
return have, w, Exact
|
|
||||||
}
|
|
||||||
}
|
|
||||||
max, _ = w.canonicalize(Legacy | Deprecated)
|
|
||||||
max, _ = addTags(max)
|
|
||||||
} else {
|
|
||||||
// Base language is not defined.
|
|
||||||
if h != nil {
|
|
||||||
for i := range h.exact {
|
|
||||||
have := h.exact[i]
|
|
||||||
if have.tag.equalsRest(w) {
|
|
||||||
return have, w, Exact
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if w.script == 0 && w.region == 0 {
|
|
||||||
// We skip all tags matching und for approximate matching, including
|
|
||||||
// private tags.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
max, _ = addTags(w)
|
|
||||||
if h = m.index[max.lang]; h == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Check for match based on maximized tag.
|
|
||||||
for i := range h.max {
|
|
||||||
have := h.max[i]
|
|
||||||
best.update(have, w, max.script, max.region)
|
|
||||||
if best.conf == Exact {
|
|
||||||
for have.nextMax != 0 {
|
|
||||||
have = h.max[have.nextMax]
|
|
||||||
best.update(have, w, max.script, max.region)
|
|
||||||
}
|
|
||||||
return best.have, best.want, High
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if best.conf <= No {
|
|
||||||
if len(want) != 0 {
|
|
||||||
return nil, want[0], No
|
|
||||||
}
|
|
||||||
return nil, Tag{}, No
|
|
||||||
}
|
|
||||||
return best.have, best.want, best.conf
|
|
||||||
}
|
|
||||||
|
|
||||||
// bestMatch accumulates the best match so far.
|
|
||||||
type bestMatch struct {
|
|
||||||
have *haveTag
|
|
||||||
want Tag
|
|
||||||
conf Confidence
|
|
||||||
// Cached results from applying tie-breaking rules.
|
|
||||||
origLang bool
|
|
||||||
origReg bool
|
|
||||||
regDist uint8
|
|
||||||
origScript bool
|
|
||||||
parentDist uint8 // 255 if have is not an ancestor of want tag.
|
|
||||||
}
|
|
||||||
|
|
||||||
// update updates the existing best match if the new pair is considered to be a
|
|
||||||
// better match.
|
|
||||||
// To determine if the given pair is a better match, it first computes the rough
|
|
||||||
// confidence level. If this surpasses the current match, it will replace it and
|
|
||||||
// update the tie-breaker rule cache. If there is a tie, it proceeds with applying
|
|
||||||
// a series of tie-breaker rules. If there is no conclusive winner after applying
|
|
||||||
// the tie-breaker rules, it leaves the current match as the preferred match.
|
|
||||||
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) {
|
|
||||||
// Bail if the maximum attainable confidence is below that of the current best match.
|
|
||||||
c := have.conf
|
|
||||||
if c < m.conf {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if have.maxScript != maxScript {
|
|
||||||
// There is usually very little comprehension between different scripts.
|
|
||||||
// In a few cases there may still be Low comprehension. This possibility is
|
|
||||||
// pre-computed and stored in have.altScript.
|
|
||||||
if Low < m.conf || have.altScript != maxScript {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c = Low
|
|
||||||
} else if have.maxRegion != maxRegion {
|
|
||||||
// There is usually a small difference between languages across regions.
|
|
||||||
// We use the region distance (below) to disambiguate between equal matches.
|
|
||||||
if High < c {
|
|
||||||
c = High
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We store the results of the computations of the tie-breaker rules along
|
|
||||||
// with the best match. There is no need to do the checks once we determine
|
|
||||||
// we have a winner, but we do still need to do the tie-breaker computations.
|
|
||||||
// We use "beaten" to keep track if we still need to do the checks.
|
|
||||||
beaten := false // true if the new pair defeats the current one.
|
|
||||||
if c != m.conf {
|
|
||||||
if c < m.conf {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tie-breaker rules:
|
|
||||||
// We prefer if the pre-maximized language was specified and identical.
|
|
||||||
origLang := have.tag.lang == tag.lang && tag.lang != 0
|
|
||||||
if !beaten && m.origLang != origLang {
|
|
||||||
if m.origLang {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// We prefer if the pre-maximized region was specified and identical.
|
|
||||||
origReg := have.tag.region == tag.region && tag.region != 0
|
|
||||||
if !beaten && m.origReg != origReg {
|
|
||||||
if m.origReg {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next we prefer smaller distances between regions, as defined by regionDist.
|
|
||||||
regDist := regionDist(have.maxRegion, maxRegion, tag.lang)
|
|
||||||
if !beaten && m.regDist != regDist {
|
|
||||||
if regDist > m.regDist {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next we prefer if the pre-maximized script was specified and identical.
|
|
||||||
origScript := have.tag.script == tag.script && tag.script != 0
|
|
||||||
if !beaten && m.origScript != origScript {
|
|
||||||
if m.origScript {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally we prefer tags which have a closer parent relationship.
|
|
||||||
parentDist := parentDistance(have.tag.region, tag)
|
|
||||||
if !beaten && m.parentDist != parentDist {
|
|
||||||
if parentDist > m.parentDist {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
beaten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update m to the newly found best match.
|
|
||||||
if beaten {
|
|
||||||
m.have = have
|
|
||||||
m.want = tag
|
|
||||||
m.conf = c
|
|
||||||
m.origLang = origLang
|
|
||||||
m.origReg = origReg
|
|
||||||
m.origScript = origScript
|
|
||||||
m.regDist = regDist
|
|
||||||
m.parentDist = parentDist
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parentDistance returns the number of times Parent must be called before the
|
|
||||||
// regions match. It is assumed that it has already been checked that lang and
|
|
||||||
// script are identical. If haveRegion does not occur in the ancestor chain of
|
|
||||||
// tag, it returns 255.
|
|
||||||
func parentDistance(haveRegion regionID, tag Tag) uint8 {
|
|
||||||
p := tag.Parent()
|
|
||||||
d := uint8(1)
|
|
||||||
for haveRegion != p.region {
|
|
||||||
if p.region == 0 {
|
|
||||||
return 255
|
|
||||||
}
|
|
||||||
p = p.Parent()
|
|
||||||
d++
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// regionDist wraps regionDistance with some exceptions to the algorithmic distance.
|
|
||||||
func regionDist(a, b regionID, lang langID) uint8 {
|
|
||||||
if lang == _en {
|
|
||||||
// Two variants of non-US English are close to each other, regardless of distance.
|
|
||||||
if a != _US && b != _US {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint8(regionDistance(a, b))
|
|
||||||
}
|
|
||||||
|
|
||||||
// regionDistance computes the distance between two regions based on the
|
|
||||||
// distance in the graph of region containments as defined in CLDR. It iterates
|
|
||||||
// over increasingly inclusive sets of groups, represented as bit vectors, until
|
|
||||||
// the source bit vector has bits in common with the destination vector.
|
|
||||||
func regionDistance(a, b regionID) int {
|
|
||||||
if a == b {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
p, q := regionInclusion[a], regionInclusion[b]
|
|
||||||
if p < nRegionGroups {
|
|
||||||
p, q = q, p
|
|
||||||
}
|
|
||||||
set := regionInclusionBits
|
|
||||||
if q < nRegionGroups && set[p]&(1<<q) != 0 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
d := 2
|
|
||||||
for goal := set[q]; set[p]&goal == 0; p = regionInclusionNext[p] {
|
|
||||||
d++
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Tag) variants() string {
|
|
||||||
if t.pVariant == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return t.str[t.pVariant:t.pExt]
|
|
||||||
}
|
|
||||||
|
|
||||||
// variantOrPrivateTagStr returns variants or private use tags.
|
|
||||||
func (t Tag) variantOrPrivateTagStr() string {
|
|
||||||
if t.pExt > 0 {
|
|
||||||
return t.str[t.pVariant:t.pExt]
|
|
||||||
}
|
|
||||||
return t.str[t.pVariant:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// equalsRest compares everything except the language.
|
|
||||||
func (a Tag) equalsRest(b Tag) bool {
|
|
||||||
// TODO: don't include extensions in this comparison. To do this efficiently,
|
|
||||||
// though, we should handle private tags separately.
|
|
||||||
return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// isExactEquivalent returns true if canonicalizing the language will not alter
|
|
||||||
// the script or region of a tag.
|
|
||||||
func isExactEquivalent(l langID) bool {
|
|
||||||
for _, o := range notEquivalent {
|
|
||||||
if o == l {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var notEquivalent []langID
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Create a list of all languages for which canonicalization may alter the
|
|
||||||
// script or region.
|
|
||||||
for _, lm := range langAliasMap {
|
|
||||||
tag := Tag{lang: langID(lm.from)}
|
|
||||||
if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 {
|
|
||||||
notEquivalent = append(notEquivalent, langID(lm.from))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
@ -1,859 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/tag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// isAlpha returns true if the byte is not a digit.
|
|
||||||
// b must be an ASCII letter or digit.
|
|
||||||
func isAlpha(b byte) bool {
|
|
||||||
return b > '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
// isAlphaNum returns true if the string contains only ASCII letters or digits.
|
|
||||||
func isAlphaNum(s []byte) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// errSyntax is returned by any of the parsing functions when the
|
|
||||||
// input is not well-formed, according to BCP 47.
|
|
||||||
// TODO: return the position at which the syntax error occurred?
|
|
||||||
var errSyntax = errors.New("language: tag is not well-formed")
|
|
||||||
|
|
||||||
// ValueError is returned by any of the parsing functions when the
|
|
||||||
// input is well-formed but the respective subtag is not recognized
|
|
||||||
// as a valid value.
|
|
||||||
type ValueError struct {
|
|
||||||
v [8]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func mkErrInvalid(s []byte) error {
|
|
||||||
var e ValueError
|
|
||||||
copy(e.v[:], s)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ValueError) tag() []byte {
|
|
||||||
n := bytes.IndexByte(e.v[:], 0)
|
|
||||||
if n == -1 {
|
|
||||||
n = 8
|
|
||||||
}
|
|
||||||
return e.v[:n]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e ValueError) Error() string {
|
|
||||||
return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subtag returns the subtag for which the error occurred.
|
|
||||||
func (e ValueError) Subtag() string {
|
|
||||||
return string(e.tag())
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
|
|
||||||
type scanner struct {
|
|
||||||
b []byte
|
|
||||||
bytes [max99thPercentileSize]byte
|
|
||||||
token []byte
|
|
||||||
start int // start position of the current token
|
|
||||||
end int // end position of the current token
|
|
||||||
next int // next point for scan
|
|
||||||
err error
|
|
||||||
done bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeScannerString(s string) scanner {
|
|
||||||
scan := scanner{}
|
|
||||||
if len(s) <= len(scan.bytes) {
|
|
||||||
scan.b = scan.bytes[:copy(scan.bytes[:], s)]
|
|
||||||
} else {
|
|
||||||
scan.b = []byte(s)
|
|
||||||
}
|
|
||||||
scan.init()
|
|
||||||
return scan
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeScanner returns a scanner using b as the input buffer.
|
|
||||||
// b is not copied and may be modified by the scanner routines.
|
|
||||||
func makeScanner(b []byte) scanner {
|
|
||||||
scan := scanner{b: b}
|
|
||||||
scan.init()
|
|
||||||
return scan
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) init() {
|
|
||||||
for i, c := range s.b {
|
|
||||||
if c == '_' {
|
|
||||||
s.b[i] = '-'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.scan()
|
|
||||||
}
|
|
||||||
|
|
||||||
// restToLower converts the string between start and end to lower case.
|
|
||||||
func (s *scanner) toLower(start, end int) {
|
|
||||||
for i := start; i < end; i++ {
|
|
||||||
c := s.b[i]
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
s.b[i] += 'a' - 'A'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) setError(e error) {
|
|
||||||
if s.err == nil || (e == errSyntax && s.err != errSyntax) {
|
|
||||||
s.err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// resizeRange shrinks or grows the array at position oldStart such that
|
|
||||||
// a new string of size newSize can fit between oldStart and oldEnd.
|
|
||||||
// Sets the scan point to after the resized range.
|
|
||||||
func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
|
|
||||||
s.start = oldStart
|
|
||||||
if end := oldStart + newSize; end != oldEnd {
|
|
||||||
diff := end - oldEnd
|
|
||||||
if end < cap(s.b) {
|
|
||||||
b := make([]byte, len(s.b)+diff)
|
|
||||||
copy(b, s.b[:oldStart])
|
|
||||||
copy(b[end:], s.b[oldEnd:])
|
|
||||||
s.b = b
|
|
||||||
} else {
|
|
||||||
s.b = append(s.b[end:], s.b[oldEnd:]...)
|
|
||||||
}
|
|
||||||
s.next = end + (s.next - s.end)
|
|
||||||
s.end = end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// replace replaces the current token with repl.
|
|
||||||
func (s *scanner) replace(repl string) {
|
|
||||||
s.resizeRange(s.start, s.end, len(repl))
|
|
||||||
copy(s.b[s.start:], repl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// gobble removes the current token from the input.
|
|
||||||
// Caller must call scan after calling gobble.
|
|
||||||
func (s *scanner) gobble(e error) {
|
|
||||||
s.setError(e)
|
|
||||||
if s.start == 0 {
|
|
||||||
s.b = s.b[:+copy(s.b, s.b[s.next:])]
|
|
||||||
s.end = 0
|
|
||||||
} else {
|
|
||||||
s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
|
|
||||||
s.end = s.start - 1
|
|
||||||
}
|
|
||||||
s.next = s.start
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteRange removes the given range from s.b before the current token.
|
|
||||||
func (s *scanner) deleteRange(start, end int) {
|
|
||||||
s.setError(errSyntax)
|
|
||||||
s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
|
|
||||||
diff := end - start
|
|
||||||
s.next -= diff
|
|
||||||
s.start -= diff
|
|
||||||
s.end -= diff
|
|
||||||
}
|
|
||||||
|
|
||||||
// scan parses the next token of a BCP 47 string. Tokens that are larger
|
|
||||||
// than 8 characters or include non-alphanumeric characters result in an error
|
|
||||||
// and are gobbled and removed from the output.
|
|
||||||
// It returns the end position of the last token consumed.
|
|
||||||
func (s *scanner) scan() (end int) {
|
|
||||||
end = s.end
|
|
||||||
s.token = nil
|
|
||||||
for s.start = s.next; s.next < len(s.b); {
|
|
||||||
i := bytes.IndexByte(s.b[s.next:], '-')
|
|
||||||
if i == -1 {
|
|
||||||
s.end = len(s.b)
|
|
||||||
s.next = len(s.b)
|
|
||||||
i = s.end - s.start
|
|
||||||
} else {
|
|
||||||
s.end = s.next + i
|
|
||||||
s.next = s.end + 1
|
|
||||||
}
|
|
||||||
token := s.b[s.start:s.end]
|
|
||||||
if i < 1 || i > 8 || !isAlphaNum(token) {
|
|
||||||
s.gobble(errSyntax)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.token = token
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
if n := len(s.b); n > 0 && s.b[n-1] == '-' {
|
|
||||||
s.setError(errSyntax)
|
|
||||||
s.b = s.b[:len(s.b)-1]
|
|
||||||
}
|
|
||||||
s.done = true
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
// acceptMinSize parses multiple tokens of the given size or greater.
|
|
||||||
// It returns the end position of the last token consumed.
|
|
||||||
func (s *scanner) acceptMinSize(min int) (end int) {
|
|
||||||
end = s.end
|
|
||||||
s.scan()
|
|
||||||
for ; len(s.token) >= min; s.scan() {
|
|
||||||
end = s.end
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
|
||||||
// failed it returns an error and any part of the tag that could be parsed.
|
|
||||||
// If parsing succeeded but an unknown value was found, it returns
|
|
||||||
// ValueError. The Tag returned in this case is just stripped of the unknown
|
|
||||||
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
|
||||||
// and extensions to this standard defined in
|
|
||||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
|
||||||
// The resulting tag is canonicalized using the default canonicalization type.
|
|
||||||
func Parse(s string) (t Tag, err error) {
|
|
||||||
return Default.Parse(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
|
||||||
// failed it returns an error and any part of the tag that could be parsed.
|
|
||||||
// If parsing succeeded but an unknown value was found, it returns
|
|
||||||
// ValueError. The Tag returned in this case is just stripped of the unknown
|
|
||||||
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
|
||||||
// and extensions to this standard defined in
|
|
||||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
|
||||||
// The resulting tag is canonicalized using the the canonicalization type c.
|
|
||||||
func (c CanonType) Parse(s string) (t Tag, err error) {
|
|
||||||
// TODO: consider supporting old-style locale key-value pairs.
|
|
||||||
if s == "" {
|
|
||||||
return und, errSyntax
|
|
||||||
}
|
|
||||||
if len(s) <= maxAltTaglen {
|
|
||||||
b := [maxAltTaglen]byte{}
|
|
||||||
for i, c := range s {
|
|
||||||
// Generating invalid UTF-8 is okay as it won't match.
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
c += 'a' - 'A'
|
|
||||||
} else if c == '_' {
|
|
||||||
c = '-'
|
|
||||||
}
|
|
||||||
b[i] = byte(c)
|
|
||||||
}
|
|
||||||
if t, ok := grandfathered(b); ok {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
scan := makeScannerString(s)
|
|
||||||
t, err = parse(&scan, s)
|
|
||||||
t, changed := t.canonicalize(c)
|
|
||||||
if changed {
|
|
||||||
t.remakeString()
|
|
||||||
}
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func parse(scan *scanner, s string) (t Tag, err error) {
|
|
||||||
t = und
|
|
||||||
var end int
|
|
||||||
if n := len(scan.token); n <= 1 {
|
|
||||||
scan.toLower(0, len(scan.b))
|
|
||||||
if n == 0 || scan.token[0] != 'x' {
|
|
||||||
return t, errSyntax
|
|
||||||
}
|
|
||||||
end = parseExtensions(scan)
|
|
||||||
} else if n >= 4 {
|
|
||||||
return und, errSyntax
|
|
||||||
} else { // the usual case
|
|
||||||
t, end = parseTag(scan)
|
|
||||||
if n := len(scan.token); n == 1 {
|
|
||||||
t.pExt = uint16(end)
|
|
||||||
end = parseExtensions(scan)
|
|
||||||
} else if end < len(scan.b) {
|
|
||||||
scan.setError(errSyntax)
|
|
||||||
scan.b = scan.b[:end]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if int(t.pVariant) < len(scan.b) {
|
|
||||||
if end < len(s) {
|
|
||||||
s = s[:end]
|
|
||||||
}
|
|
||||||
if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
|
|
||||||
t.str = s
|
|
||||||
} else {
|
|
||||||
t.str = string(scan.b)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.pVariant, t.pExt = 0, 0
|
|
||||||
}
|
|
||||||
return t, scan.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTag parses language, script, region and variants.
|
|
||||||
// It returns a Tag and the end position in the input that was parsed.
|
|
||||||
func parseTag(scan *scanner) (t Tag, end int) {
|
|
||||||
var e error
|
|
||||||
// TODO: set an error if an unknown lang, script or region is encountered.
|
|
||||||
t.lang, e = getLangID(scan.token)
|
|
||||||
scan.setError(e)
|
|
||||||
scan.replace(t.lang.String())
|
|
||||||
langStart := scan.start
|
|
||||||
end = scan.scan()
|
|
||||||
for len(scan.token) == 3 && isAlpha(scan.token[0]) {
|
|
||||||
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
|
|
||||||
// to a tag of the form <extlang>.
|
|
||||||
lang, e := getLangID(scan.token)
|
|
||||||
if lang != 0 {
|
|
||||||
t.lang = lang
|
|
||||||
copy(scan.b[langStart:], lang.String())
|
|
||||||
scan.b[langStart+3] = '-'
|
|
||||||
scan.start = langStart + 4
|
|
||||||
}
|
|
||||||
scan.gobble(e)
|
|
||||||
end = scan.scan()
|
|
||||||
}
|
|
||||||
if len(scan.token) == 4 && isAlpha(scan.token[0]) {
|
|
||||||
t.script, e = getScriptID(script, scan.token)
|
|
||||||
if t.script == 0 {
|
|
||||||
scan.gobble(e)
|
|
||||||
}
|
|
||||||
end = scan.scan()
|
|
||||||
}
|
|
||||||
if n := len(scan.token); n >= 2 && n <= 3 {
|
|
||||||
t.region, e = getRegionID(scan.token)
|
|
||||||
if t.region == 0 {
|
|
||||||
scan.gobble(e)
|
|
||||||
} else {
|
|
||||||
scan.replace(t.region.String())
|
|
||||||
}
|
|
||||||
end = scan.scan()
|
|
||||||
}
|
|
||||||
scan.toLower(scan.start, len(scan.b))
|
|
||||||
t.pVariant = byte(end)
|
|
||||||
end = parseVariants(scan, end, t)
|
|
||||||
t.pExt = uint16(end)
|
|
||||||
return t, end
|
|
||||||
}
|
|
||||||
|
|
||||||
var separator = []byte{'-'}
|
|
||||||
|
|
||||||
// parseVariants scans tokens as long as each token is a valid variant string.
|
|
||||||
// Duplicate variants are removed.
|
|
||||||
func parseVariants(scan *scanner, end int, t Tag) int {
|
|
||||||
start := scan.start
|
|
||||||
varIDBuf := [4]uint8{}
|
|
||||||
variantBuf := [4][]byte{}
|
|
||||||
varID := varIDBuf[:0]
|
|
||||||
variant := variantBuf[:0]
|
|
||||||
last := -1
|
|
||||||
needSort := false
|
|
||||||
for ; len(scan.token) >= 4; scan.scan() {
|
|
||||||
// TODO: measure the impact of needing this conversion and redesign
|
|
||||||
// the data structure if there is an issue.
|
|
||||||
v, ok := variantIndex[string(scan.token)]
|
|
||||||
if !ok {
|
|
||||||
// unknown variant
|
|
||||||
// TODO: allow user-defined variants?
|
|
||||||
scan.gobble(mkErrInvalid(scan.token))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
varID = append(varID, v)
|
|
||||||
variant = append(variant, scan.token)
|
|
||||||
if !needSort {
|
|
||||||
if last < int(v) {
|
|
||||||
last = int(v)
|
|
||||||
} else {
|
|
||||||
needSort = true
|
|
||||||
// There is no legal combinations of more than 7 variants
|
|
||||||
// (and this is by no means a useful sequence).
|
|
||||||
const maxVariants = 8
|
|
||||||
if len(varID) > maxVariants {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
if needSort {
|
|
||||||
sort.Sort(variantsSort{varID, variant})
|
|
||||||
k, l := 0, -1
|
|
||||||
for i, v := range varID {
|
|
||||||
w := int(v)
|
|
||||||
if l == w {
|
|
||||||
// Remove duplicates.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
varID[k] = varID[i]
|
|
||||||
variant[k] = variant[i]
|
|
||||||
k++
|
|
||||||
l = w
|
|
||||||
}
|
|
||||||
if str := bytes.Join(variant[:k], separator); len(str) == 0 {
|
|
||||||
end = start - 1
|
|
||||||
} else {
|
|
||||||
scan.resizeRange(start, end, len(str))
|
|
||||||
copy(scan.b[scan.start:], str)
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
type variantsSort struct {
|
|
||||||
i []uint8
|
|
||||||
v [][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s variantsSort) Len() int {
|
|
||||||
return len(s.i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s variantsSort) Swap(i, j int) {
|
|
||||||
s.i[i], s.i[j] = s.i[j], s.i[i]
|
|
||||||
s.v[i], s.v[j] = s.v[j], s.v[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s variantsSort) Less(i, j int) bool {
|
|
||||||
return s.i[i] < s.i[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
type bytesSort [][]byte
|
|
||||||
|
|
||||||
func (b bytesSort) Len() int {
|
|
||||||
return len(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b bytesSort) Swap(i, j int) {
|
|
||||||
b[i], b[j] = b[j], b[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b bytesSort) Less(i, j int) bool {
|
|
||||||
return bytes.Compare(b[i], b[j]) == -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseExtensions parses and normalizes the extensions in the buffer.
|
|
||||||
// It returns the last position of scan.b that is part of any extension.
|
|
||||||
// It also trims scan.b to remove excess parts accordingly.
|
|
||||||
func parseExtensions(scan *scanner) int {
|
|
||||||
start := scan.start
|
|
||||||
exts := [][]byte{}
|
|
||||||
private := []byte{}
|
|
||||||
end := scan.end
|
|
||||||
for len(scan.token) == 1 {
|
|
||||||
extStart := scan.start
|
|
||||||
ext := scan.token[0]
|
|
||||||
end = parseExtension(scan)
|
|
||||||
extension := scan.b[extStart:end]
|
|
||||||
if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
|
|
||||||
scan.setError(errSyntax)
|
|
||||||
end = extStart
|
|
||||||
continue
|
|
||||||
} else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
|
|
||||||
scan.b = scan.b[:end]
|
|
||||||
return end
|
|
||||||
} else if ext == 'x' {
|
|
||||||
private = extension
|
|
||||||
break
|
|
||||||
}
|
|
||||||
exts = append(exts, extension)
|
|
||||||
}
|
|
||||||
sort.Sort(bytesSort(exts))
|
|
||||||
if len(private) > 0 {
|
|
||||||
exts = append(exts, private)
|
|
||||||
}
|
|
||||||
scan.b = scan.b[:start]
|
|
||||||
if len(exts) > 0 {
|
|
||||||
scan.b = append(scan.b, bytes.Join(exts, separator)...)
|
|
||||||
} else if start > 0 {
|
|
||||||
// Strip trailing '-'.
|
|
||||||
scan.b = scan.b[:start-1]
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseExtension parses a single extension and returns the position of
|
|
||||||
// the extension end.
|
|
||||||
func parseExtension(scan *scanner) int {
|
|
||||||
start, end := scan.start, scan.end
|
|
||||||
switch scan.token[0] {
|
|
||||||
case 'u':
|
|
||||||
attrStart := end
|
|
||||||
scan.scan()
|
|
||||||
for last := []byte{}; len(scan.token) > 2; scan.scan() {
|
|
||||||
if bytes.Compare(scan.token, last) != -1 {
|
|
||||||
// Attributes are unsorted. Start over from scratch.
|
|
||||||
p := attrStart + 1
|
|
||||||
scan.next = p
|
|
||||||
attrs := [][]byte{}
|
|
||||||
for scan.scan(); len(scan.token) > 2; scan.scan() {
|
|
||||||
attrs = append(attrs, scan.token)
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
sort.Sort(bytesSort(attrs))
|
|
||||||
copy(scan.b[p:], bytes.Join(attrs, separator))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
last = scan.token
|
|
||||||
end = scan.end
|
|
||||||
}
|
|
||||||
var last, key []byte
|
|
||||||
for attrEnd := end; len(scan.token) == 2; last = key {
|
|
||||||
key = scan.token
|
|
||||||
keyEnd := scan.end
|
|
||||||
end = scan.acceptMinSize(3)
|
|
||||||
// TODO: check key value validity
|
|
||||||
if keyEnd == end || bytes.Compare(key, last) != 1 {
|
|
||||||
// We have an invalid key or the keys are not sorted.
|
|
||||||
// Start scanning keys from scratch and reorder.
|
|
||||||
p := attrEnd + 1
|
|
||||||
scan.next = p
|
|
||||||
keys := [][]byte{}
|
|
||||||
for scan.scan(); len(scan.token) == 2; {
|
|
||||||
keyStart, keyEnd := scan.start, scan.end
|
|
||||||
end = scan.acceptMinSize(3)
|
|
||||||
if keyEnd != end {
|
|
||||||
keys = append(keys, scan.b[keyStart:end])
|
|
||||||
} else {
|
|
||||||
scan.setError(errSyntax)
|
|
||||||
end = keyStart
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Sort(bytesSort(keys))
|
|
||||||
reordered := bytes.Join(keys, separator)
|
|
||||||
if e := p + len(reordered); e < end {
|
|
||||||
scan.deleteRange(e, end)
|
|
||||||
end = e
|
|
||||||
}
|
|
||||||
copy(scan.b[p:], bytes.Join(keys, separator))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 't':
|
|
||||||
scan.scan()
|
|
||||||
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
|
|
||||||
_, end = parseTag(scan)
|
|
||||||
scan.toLower(start, end)
|
|
||||||
}
|
|
||||||
for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
|
|
||||||
end = scan.acceptMinSize(3)
|
|
||||||
}
|
|
||||||
case 'x':
|
|
||||||
end = scan.acceptMinSize(1)
|
|
||||||
default:
|
|
||||||
end = scan.acceptMinSize(2)
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
|
||||||
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
|
||||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
|
||||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
|
||||||
// accumulated, but if two extensions of the same type are passed, the latter
|
|
||||||
// will replace the former. A Tag overwrites all former values and typically
|
|
||||||
// only makes sense as the first argument. The resulting tag is returned after
|
|
||||||
// canonicalizing using the Default CanonType. If one or more errors are
|
|
||||||
// encountered, one of the errors is returned.
|
|
||||||
func Compose(part ...interface{}) (t Tag, err error) {
|
|
||||||
return Default.Compose(part...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
|
||||||
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
|
||||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
|
||||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
|
||||||
// accumulated, but if two extensions of the same type are passed, the latter
|
|
||||||
// will replace the former. A Tag overwrites all former values and typically
|
|
||||||
// only makes sense as the first argument. The resulting tag is returned after
|
|
||||||
// canonicalizing using CanonType c. If one or more errors are encountered,
|
|
||||||
// one of the errors is returned.
|
|
||||||
func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
|
|
||||||
var b builder
|
|
||||||
if err = b.update(part...); err != nil {
|
|
||||||
return und, err
|
|
||||||
}
|
|
||||||
t, _ = b.tag.canonicalize(c)
|
|
||||||
|
|
||||||
if len(b.ext) > 0 || len(b.variant) > 0 {
|
|
||||||
sort.Sort(sortVariant(b.variant))
|
|
||||||
sort.Strings(b.ext)
|
|
||||||
if b.private != "" {
|
|
||||||
b.ext = append(b.ext, b.private)
|
|
||||||
}
|
|
||||||
n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...)
|
|
||||||
buf := make([]byte, n)
|
|
||||||
p := t.genCoreBytes(buf)
|
|
||||||
t.pVariant = byte(p)
|
|
||||||
p += appendTokens(buf[p:], b.variant...)
|
|
||||||
t.pExt = uint16(p)
|
|
||||||
p += appendTokens(buf[p:], b.ext...)
|
|
||||||
t.str = string(buf[:p])
|
|
||||||
} else if b.private != "" {
|
|
||||||
t.str = b.private
|
|
||||||
t.remakeString()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type builder struct {
|
|
||||||
tag Tag
|
|
||||||
|
|
||||||
private string // the x extension
|
|
||||||
ext []string
|
|
||||||
variant []string
|
|
||||||
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *builder) addExt(e string) {
|
|
||||||
if e == "" {
|
|
||||||
} else if e[0] == 'x' {
|
|
||||||
b.private = e
|
|
||||||
} else {
|
|
||||||
b.ext = append(b.ext, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var errInvalidArgument = errors.New("invalid Extension or Variant")
|
|
||||||
|
|
||||||
func (b *builder) update(part ...interface{}) (err error) {
|
|
||||||
replace := func(l *[]string, s string, eq func(a, b string) bool) bool {
|
|
||||||
if s == "" {
|
|
||||||
b.err = errInvalidArgument
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for i, v := range *l {
|
|
||||||
if eq(v, s) {
|
|
||||||
(*l)[i] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, x := range part {
|
|
||||||
switch v := x.(type) {
|
|
||||||
case Tag:
|
|
||||||
b.tag.lang = v.lang
|
|
||||||
b.tag.region = v.region
|
|
||||||
b.tag.script = v.script
|
|
||||||
if v.str != "" {
|
|
||||||
b.variant = nil
|
|
||||||
for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; {
|
|
||||||
x, s = nextToken(s)
|
|
||||||
b.variant = append(b.variant, x)
|
|
||||||
}
|
|
||||||
b.ext, b.private = nil, ""
|
|
||||||
for i, e := int(v.pExt), ""; i < len(v.str); {
|
|
||||||
i, e = getExtension(v.str, i)
|
|
||||||
b.addExt(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case Base:
|
|
||||||
b.tag.lang = v.langID
|
|
||||||
case Script:
|
|
||||||
b.tag.script = v.scriptID
|
|
||||||
case Region:
|
|
||||||
b.tag.region = v.regionID
|
|
||||||
case Variant:
|
|
||||||
if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) {
|
|
||||||
b.variant = append(b.variant, v.variant)
|
|
||||||
}
|
|
||||||
case Extension:
|
|
||||||
if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) {
|
|
||||||
b.addExt(v.s)
|
|
||||||
}
|
|
||||||
case []Variant:
|
|
||||||
b.variant = nil
|
|
||||||
for _, x := range v {
|
|
||||||
b.update(x)
|
|
||||||
}
|
|
||||||
case []Extension:
|
|
||||||
b.ext, b.private = nil, ""
|
|
||||||
for _, e := range v {
|
|
||||||
b.update(e)
|
|
||||||
}
|
|
||||||
// TODO: support parsing of raw strings based on morphology or just extensions?
|
|
||||||
case error:
|
|
||||||
err = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func tokenLen(token ...string) (n int) {
|
|
||||||
for _, t := range token {
|
|
||||||
n += len(t) + 1
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendTokens(b []byte, token ...string) int {
|
|
||||||
p := 0
|
|
||||||
for _, t := range token {
|
|
||||||
b[p] = '-'
|
|
||||||
copy(b[p+1:], t)
|
|
||||||
p += 1 + len(t)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
type sortVariant []string
|
|
||||||
|
|
||||||
func (s sortVariant) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sortVariant) Swap(i, j int) {
|
|
||||||
s[j], s[i] = s[i], s[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sortVariant) Less(i, j int) bool {
|
|
||||||
return variantIndex[s[i]] < variantIndex[s[j]]
|
|
||||||
}
|
|
||||||
|
|
||||||
func findExt(list []string, x byte) int {
|
|
||||||
for i, e := range list {
|
|
||||||
if e[0] == x {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// getExtension returns the name, body and end position of the extension.
|
|
||||||
func getExtension(s string, p int) (end int, ext string) {
|
|
||||||
if s[p] == '-' {
|
|
||||||
p++
|
|
||||||
}
|
|
||||||
if s[p] == 'x' {
|
|
||||||
return len(s), s[p:]
|
|
||||||
}
|
|
||||||
end = nextExtension(s, p)
|
|
||||||
return end, s[p:end]
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextExtension finds the next extension within the string, searching
|
|
||||||
// for the -<char>- pattern from position p.
|
|
||||||
// In the fast majority of cases, language tags will have at most
|
|
||||||
// one extension and extensions tend to be small.
|
|
||||||
func nextExtension(s string, p int) int {
|
|
||||||
for n := len(s) - 3; p < n; {
|
|
||||||
if s[p] == '-' {
|
|
||||||
if s[p+2] == '-' {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
p += 3
|
|
||||||
} else {
|
|
||||||
p++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
|
|
||||||
|
|
||||||
// ParseAcceptLanguage parses the contents of a Accept-Language header as
|
|
||||||
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
|
|
||||||
// a list of corresponding quality weights. It is more permissive than RFC 2616
|
|
||||||
// and may return non-nil slices even if the input is not valid.
|
|
||||||
// The Tags will be sorted by highest weight first and then by first occurrence.
|
|
||||||
// Tags with a weight of zero will be dropped. An error will be returned if the
|
|
||||||
// input could not be parsed.
|
|
||||||
func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
|
|
||||||
var entry string
|
|
||||||
for s != "" {
|
|
||||||
if entry, s = split(s, ','); entry == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
entry, weight := split(entry, ';')
|
|
||||||
|
|
||||||
// Scan the language.
|
|
||||||
t, err := Parse(entry)
|
|
||||||
if err != nil {
|
|
||||||
id, ok := acceptFallback[entry]
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
t = Tag{lang: id}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan the optional weight.
|
|
||||||
w := 1.0
|
|
||||||
if weight != "" {
|
|
||||||
weight = consume(weight, 'q')
|
|
||||||
weight = consume(weight, '=')
|
|
||||||
// consume returns the empty string when a token could not be
|
|
||||||
// consumed, resulting in an error for ParseFloat.
|
|
||||||
if w, err = strconv.ParseFloat(weight, 32); err != nil {
|
|
||||||
return nil, nil, errInvalidWeight
|
|
||||||
}
|
|
||||||
// Drop tags with a quality weight of 0.
|
|
||||||
if w <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tag = append(tag, t)
|
|
||||||
q = append(q, float32(w))
|
|
||||||
}
|
|
||||||
sortStable(&tagSort{tag, q})
|
|
||||||
return tag, q, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consume removes a leading token c from s and returns the result or the empty
|
|
||||||
// string if there is no such token.
|
|
||||||
func consume(s string, c byte) string {
|
|
||||||
if s == "" || s[0] != c {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(s[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func split(s string, c byte) (head, tail string) {
|
|
||||||
if i := strings.IndexByte(s, c); i >= 0 {
|
|
||||||
return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:])
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(s), ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add hack mapping to deal with a small number of cases that that occur
|
|
||||||
// in Accept-Language (with reasonable frequency).
|
|
||||||
var acceptFallback = map[string]langID{
|
|
||||||
"english": _en,
|
|
||||||
"deutsch": _de,
|
|
||||||
"italian": _it,
|
|
||||||
"french": _fr,
|
|
||||||
"*": _mul, // defined in the spec to match all languages.
|
|
||||||
}
|
|
||||||
|
|
||||||
type tagSort struct {
|
|
||||||
tag []Tag
|
|
||||||
q []float32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *tagSort) Len() int {
|
|
||||||
return len(s.q)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *tagSort) Less(i, j int) bool {
|
|
||||||
return s.q[i] > s.q[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *tagSort) Swap(i, j int) {
|
|
||||||
s.tag[i], s.tag[j] = s.tag[j], s.tag[i]
|
|
||||||
s.q[i], s.q[j] = s.q[j], s.q[i]
|
|
||||||
}
|
|
3547
vendor/golang.org/x/text/language/tables.go
generated
vendored
3547
vendor/golang.org/x/text/language/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
@ -1,143 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package language
|
|
||||||
|
|
||||||
// TODO: Various sets of commonly use tags and regions.
|
|
||||||
|
|
||||||
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
|
||||||
// It simplifies safe initialization of Tag values.
|
|
||||||
func MustParse(s string) Tag {
|
|
||||||
t, err := Parse(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
|
||||||
// It simplifies safe initialization of Tag values.
|
|
||||||
func (c CanonType) MustParse(s string) Tag {
|
|
||||||
t, err := c.Parse(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
|
|
||||||
// It simplifies safe initialization of Base values.
|
|
||||||
func MustParseBase(s string) Base {
|
|
||||||
b, err := ParseBase(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParseScript is like ParseScript, but panics if the given script cannot be
|
|
||||||
// parsed. It simplifies safe initialization of Script values.
|
|
||||||
func MustParseScript(s string) Script {
|
|
||||||
scr, err := ParseScript(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return scr
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParseRegion is like ParseRegion, but panics if the given region cannot be
|
|
||||||
// parsed. It simplifies safe initialization of Region values.
|
|
||||||
func MustParseRegion(s string) Region {
|
|
||||||
r, err := ParseRegion(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
und = Tag{}
|
|
||||||
|
|
||||||
Und Tag = Tag{}
|
|
||||||
|
|
||||||
Afrikaans Tag = Tag{lang: _af} // af
|
|
||||||
Amharic Tag = Tag{lang: _am} // am
|
|
||||||
Arabic Tag = Tag{lang: _ar} // ar
|
|
||||||
ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001
|
|
||||||
Azerbaijani Tag = Tag{lang: _az} // az
|
|
||||||
Bulgarian Tag = Tag{lang: _bg} // bg
|
|
||||||
Bengali Tag = Tag{lang: _bn} // bn
|
|
||||||
Catalan Tag = Tag{lang: _ca} // ca
|
|
||||||
Czech Tag = Tag{lang: _cs} // cs
|
|
||||||
Danish Tag = Tag{lang: _da} // da
|
|
||||||
German Tag = Tag{lang: _de} // de
|
|
||||||
Greek Tag = Tag{lang: _el} // el
|
|
||||||
English Tag = Tag{lang: _en} // en
|
|
||||||
AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US
|
|
||||||
BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB
|
|
||||||
Spanish Tag = Tag{lang: _es} // es
|
|
||||||
EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES
|
|
||||||
LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419
|
|
||||||
Estonian Tag = Tag{lang: _et} // et
|
|
||||||
Persian Tag = Tag{lang: _fa} // fa
|
|
||||||
Finnish Tag = Tag{lang: _fi} // fi
|
|
||||||
Filipino Tag = Tag{lang: _fil} // fil
|
|
||||||
French Tag = Tag{lang: _fr} // fr
|
|
||||||
CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA
|
|
||||||
Gujarati Tag = Tag{lang: _gu} // gu
|
|
||||||
Hebrew Tag = Tag{lang: _he} // he
|
|
||||||
Hindi Tag = Tag{lang: _hi} // hi
|
|
||||||
Croatian Tag = Tag{lang: _hr} // hr
|
|
||||||
Hungarian Tag = Tag{lang: _hu} // hu
|
|
||||||
Armenian Tag = Tag{lang: _hy} // hy
|
|
||||||
Indonesian Tag = Tag{lang: _id} // id
|
|
||||||
Icelandic Tag = Tag{lang: _is} // is
|
|
||||||
Italian Tag = Tag{lang: _it} // it
|
|
||||||
Japanese Tag = Tag{lang: _ja} // ja
|
|
||||||
Georgian Tag = Tag{lang: _ka} // ka
|
|
||||||
Kazakh Tag = Tag{lang: _kk} // kk
|
|
||||||
Khmer Tag = Tag{lang: _km} // km
|
|
||||||
Kannada Tag = Tag{lang: _kn} // kn
|
|
||||||
Korean Tag = Tag{lang: _ko} // ko
|
|
||||||
Kirghiz Tag = Tag{lang: _ky} // ky
|
|
||||||
Lao Tag = Tag{lang: _lo} // lo
|
|
||||||
Lithuanian Tag = Tag{lang: _lt} // lt
|
|
||||||
Latvian Tag = Tag{lang: _lv} // lv
|
|
||||||
Macedonian Tag = Tag{lang: _mk} // mk
|
|
||||||
Malayalam Tag = Tag{lang: _ml} // ml
|
|
||||||
Mongolian Tag = Tag{lang: _mn} // mn
|
|
||||||
Marathi Tag = Tag{lang: _mr} // mr
|
|
||||||
Malay Tag = Tag{lang: _ms} // ms
|
|
||||||
Burmese Tag = Tag{lang: _my} // my
|
|
||||||
Nepali Tag = Tag{lang: _ne} // ne
|
|
||||||
Dutch Tag = Tag{lang: _nl} // nl
|
|
||||||
Norwegian Tag = Tag{lang: _no} // no
|
|
||||||
Punjabi Tag = Tag{lang: _pa} // pa
|
|
||||||
Polish Tag = Tag{lang: _pl} // pl
|
|
||||||
Portuguese Tag = Tag{lang: _pt} // pt
|
|
||||||
BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR
|
|
||||||
EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT
|
|
||||||
Romanian Tag = Tag{lang: _ro} // ro
|
|
||||||
Russian Tag = Tag{lang: _ru} // ru
|
|
||||||
Sinhala Tag = Tag{lang: _si} // si
|
|
||||||
Slovak Tag = Tag{lang: _sk} // sk
|
|
||||||
Slovenian Tag = Tag{lang: _sl} // sl
|
|
||||||
Albanian Tag = Tag{lang: _sq} // sq
|
|
||||||
Serbian Tag = Tag{lang: _sr} // sr
|
|
||||||
SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn
|
|
||||||
Swedish Tag = Tag{lang: _sv} // sv
|
|
||||||
Swahili Tag = Tag{lang: _sw} // sw
|
|
||||||
Tamil Tag = Tag{lang: _ta} // ta
|
|
||||||
Telugu Tag = Tag{lang: _te} // te
|
|
||||||
Thai Tag = Tag{lang: _th} // th
|
|
||||||
Turkish Tag = Tag{lang: _tr} // tr
|
|
||||||
Ukrainian Tag = Tag{lang: _uk} // uk
|
|
||||||
Urdu Tag = Tag{lang: _ur} // ur
|
|
||||||
Uzbek Tag = Tag{lang: _uz} // uz
|
|
||||||
Vietnamese Tag = Tag{lang: _vi} // vi
|
|
||||||
Chinese Tag = Tag{lang: _zh} // zh
|
|
||||||
SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans
|
|
||||||
TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant
|
|
||||||
Zulu Tag = Tag{lang: _zu} // zu
|
|
||||||
)
|
|
187
vendor/golang.org/x/text/runes/cond.go
generated
vendored
187
vendor/golang.org/x/text/runes/cond.go
generated
vendored
@ -1,187 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package runes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
|
|
||||||
// This is done for various reasons:
|
|
||||||
// - To retain the semantics of the Nop transformer: if input is passed to a Nop
|
|
||||||
// one would expect it to be unchanged.
|
|
||||||
// - It would be very expensive to pass a converted RuneError to a transformer:
|
|
||||||
// a transformer might need more source bytes after RuneError, meaning that
|
|
||||||
// the only way to pass it safely is to create a new buffer and manage the
|
|
||||||
// intermingling of RuneErrors and normal input.
|
|
||||||
// - Many transformers leave ill-formed UTF-8 as is, so this is not
|
|
||||||
// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
|
|
||||||
// logical consequence of the operation (as for Map) or if it otherwise would
|
|
||||||
// pose security concerns (as for Remove).
|
|
||||||
// - An alternative would be to return an error on ill-formed UTF-8, but this
|
|
||||||
// would be inconsistent with other operations.
|
|
||||||
|
|
||||||
// If returns a transformer that applies tIn to consecutive runes for which
|
|
||||||
// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
|
|
||||||
// is called on tIn and tNotIn at the start of each run. A Nop transformer will
|
|
||||||
// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
|
|
||||||
// to RuneError to determine which transformer to apply, but is passed as is to
|
|
||||||
// the respective transformer.
|
|
||||||
func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
|
|
||||||
if tIn == nil && tNotIn == nil {
|
|
||||||
return Transformer{transform.Nop}
|
|
||||||
}
|
|
||||||
if tIn == nil {
|
|
||||||
tIn = transform.Nop
|
|
||||||
}
|
|
||||||
if tNotIn == nil {
|
|
||||||
tNotIn = transform.Nop
|
|
||||||
}
|
|
||||||
sIn, ok := tIn.(transform.SpanningTransformer)
|
|
||||||
if !ok {
|
|
||||||
sIn = dummySpan{tIn}
|
|
||||||
}
|
|
||||||
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
|
|
||||||
if !ok {
|
|
||||||
sNotIn = dummySpan{tNotIn}
|
|
||||||
}
|
|
||||||
|
|
||||||
a := &cond{
|
|
||||||
tIn: sIn,
|
|
||||||
tNotIn: sNotIn,
|
|
||||||
f: s.Contains,
|
|
||||||
}
|
|
||||||
a.Reset()
|
|
||||||
return Transformer{a}
|
|
||||||
}
|
|
||||||
|
|
||||||
type dummySpan struct{ transform.Transformer }
|
|
||||||
|
|
||||||
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
return 0, transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
|
|
||||||
type cond struct {
|
|
||||||
tIn, tNotIn transform.SpanningTransformer
|
|
||||||
f func(rune) bool
|
|
||||||
check func(rune) bool // current check to perform
|
|
||||||
t transform.SpanningTransformer // current transformer to use
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset implements transform.Transformer.
|
|
||||||
func (t *cond) Reset() {
|
|
||||||
t.check = t.is
|
|
||||||
t.t = t.tIn
|
|
||||||
t.t.Reset() // notIn will be reset on first usage.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cond) is(r rune) bool {
|
|
||||||
if t.f(r) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
t.check = t.isNot
|
|
||||||
t.t = t.tNotIn
|
|
||||||
t.tNotIn.Reset()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cond) isNot(r rune) bool {
|
|
||||||
if !t.f(r) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
t.check = t.is
|
|
||||||
t.t = t.tIn
|
|
||||||
t.tIn.Reset()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// This implementation of Span doesn't help all too much, but it needs to be
|
|
||||||
// there to satisfy this package's Transformer interface.
|
|
||||||
// TODO: there are certainly room for improvements, though. For example, if
|
|
||||||
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
|
|
||||||
// to special-case that loop.
|
|
||||||
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
p := 0
|
|
||||||
for n < len(src) && err == nil {
|
|
||||||
// Don't process too much at a time as the Spanner that will be
|
|
||||||
// called on this block may terminate early.
|
|
||||||
const maxChunk = 4096
|
|
||||||
max := len(src)
|
|
||||||
if v := n + maxChunk; v < max {
|
|
||||||
max = v
|
|
||||||
}
|
|
||||||
atEnd := false
|
|
||||||
size := 0
|
|
||||||
current := t.t
|
|
||||||
for ; p < max; p += size {
|
|
||||||
r := rune(src[p])
|
|
||||||
if r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
|
||||||
if !atEOF && !utf8.FullRune(src[p:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !t.check(r) {
|
|
||||||
// The next rune will be the start of a new run.
|
|
||||||
atEnd = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
|
|
||||||
n += n2
|
|
||||||
if err2 != nil {
|
|
||||||
return n, err2
|
|
||||||
}
|
|
||||||
// At this point either err != nil or t.check will pass for the rune at p.
|
|
||||||
p = n + size
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
p := 0
|
|
||||||
for nSrc < len(src) && err == nil {
|
|
||||||
// Don't process too much at a time, as the work might be wasted if the
|
|
||||||
// destination buffer isn't large enough to hold the result or a
|
|
||||||
// transform returns an error early.
|
|
||||||
const maxChunk = 4096
|
|
||||||
max := len(src)
|
|
||||||
if n := nSrc + maxChunk; n < len(src) {
|
|
||||||
max = n
|
|
||||||
}
|
|
||||||
atEnd := false
|
|
||||||
size := 0
|
|
||||||
current := t.t
|
|
||||||
for ; p < max; p += size {
|
|
||||||
r := rune(src[p])
|
|
||||||
if r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
|
||||||
if !atEOF && !utf8.FullRune(src[p:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !t.check(r) {
|
|
||||||
// The next rune will be the start of a new run.
|
|
||||||
atEnd = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
|
|
||||||
nDst += nDst2
|
|
||||||
nSrc += nSrc2
|
|
||||||
if err2 != nil {
|
|
||||||
return nDst, nSrc, err2
|
|
||||||
}
|
|
||||||
// At this point either err != nil or t.check will pass for the rune at p.
|
|
||||||
p = nSrc + size
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
|
||||||
}
|
|
355
vendor/golang.org/x/text/runes/runes.go
generated
vendored
355
vendor/golang.org/x/text/runes/runes.go
generated
vendored
@ -1,355 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package runes provide transforms for UTF-8 encoded text.
|
|
||||||
package runes // import "golang.org/x/text/runes"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Set is a collection of runes.
|
|
||||||
type Set interface {
|
|
||||||
// Contains returns true if r is contained in the set.
|
|
||||||
Contains(r rune) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type setFunc func(rune) bool
|
|
||||||
|
|
||||||
func (s setFunc) Contains(r rune) bool {
|
|
||||||
return s(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: using funcs here instead of wrapping types result in cleaner
|
|
||||||
// documentation and a smaller API.
|
|
||||||
|
|
||||||
// In creates a Set with a Contains method that returns true for all runes in
|
|
||||||
// the given RangeTable.
|
|
||||||
func In(rt *unicode.RangeTable) Set {
|
|
||||||
return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
|
|
||||||
}
|
|
||||||
|
|
||||||
// In creates a Set with a Contains method that returns true for all runes not
|
|
||||||
// in the given RangeTable.
|
|
||||||
func NotIn(rt *unicode.RangeTable) Set {
|
|
||||||
return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
|
|
||||||
}
|
|
||||||
|
|
||||||
// Predicate creates a Set with a Contains method that returns f(r).
|
|
||||||
func Predicate(f func(rune) bool) Set {
|
|
||||||
return setFunc(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transformer implements the transform.Transformer interface.
|
|
||||||
type Transformer struct {
|
|
||||||
t transform.SpanningTransformer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
return t.t.Transform(dst, src, atEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
|
|
||||||
return t.t.Span(b, atEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Transformer) Reset() { t.t.Reset() }
|
|
||||||
|
|
||||||
// Bytes returns a new byte slice with the result of converting b using t. It
|
|
||||||
// calls Reset on t. It returns nil if any error was found. This can only happen
|
|
||||||
// if an error-producing Transformer is passed to If.
|
|
||||||
func (t Transformer) Bytes(b []byte) []byte {
|
|
||||||
b, _, err := transform.Bytes(t, b)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string with the result of converting s using t. It calls
|
|
||||||
// Reset on t. It returns the empty string if any error was found. This can only
|
|
||||||
// happen if an error-producing Transformer is passed to If.
|
|
||||||
func (t Transformer) String(s string) string {
|
|
||||||
s, _, err := transform.String(t, s)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO:
|
|
||||||
// - Copy: copying strings and bytes in whole-rune units.
|
|
||||||
// - Validation (maybe)
|
|
||||||
// - Well-formed-ness (maybe)
|
|
||||||
|
|
||||||
const runeErrorString = string(utf8.RuneError)
|
|
||||||
|
|
||||||
// Remove returns a Transformer that removes runes r for which s.Contains(r).
|
|
||||||
// Illegal input bytes are replaced by RuneError before being passed to f.
|
|
||||||
func Remove(s Set) Transformer {
|
|
||||||
if f, ok := s.(setFunc); ok {
|
|
||||||
// This little trick cuts the running time of BenchmarkRemove for sets
|
|
||||||
// created by Predicate roughly in half.
|
|
||||||
// TODO: special-case RangeTables as well.
|
|
||||||
return Transformer{remove(f)}
|
|
||||||
}
|
|
||||||
return Transformer{remove(s.Contains)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: remove transform.RemoveFunc.
|
|
||||||
|
|
||||||
type remove func(r rune) bool
|
|
||||||
|
|
||||||
func (remove) Reset() {}
|
|
||||||
|
|
||||||
// Span implements transform.Spanner.
|
|
||||||
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
for r, size := rune(0), 0; n < len(src); {
|
|
||||||
if r = rune(src[n]); r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
|
||||||
// Invalid rune.
|
|
||||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
} else {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if t(r) {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
break
|
|
||||||
}
|
|
||||||
n += size
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform implements transform.Transformer.
|
|
||||||
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
for r, size := rune(0), 0; nSrc < len(src); {
|
|
||||||
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
|
|
||||||
// Invalid rune.
|
|
||||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// We replace illegal bytes with RuneError. Not doing so might
|
|
||||||
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
|
|
||||||
// The resulting byte sequence may subsequently contain runes
|
|
||||||
// for which t(r) is true that were passed unnoticed.
|
|
||||||
if !t(utf8.RuneError) {
|
|
||||||
if nDst+3 > len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
dst[nDst+0] = runeErrorString[0]
|
|
||||||
dst[nDst+1] = runeErrorString[1]
|
|
||||||
dst[nDst+2] = runeErrorString[2]
|
|
||||||
nDst += 3
|
|
||||||
}
|
|
||||||
nSrc++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if t(r) {
|
|
||||||
nSrc += size
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if nDst+size > len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
for i := 0; i < size; i++ {
|
|
||||||
dst[nDst] = src[nSrc]
|
|
||||||
nDst++
|
|
||||||
nSrc++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map returns a Transformer that maps the runes in the input using the given
|
|
||||||
// mapping. Illegal bytes in the input are converted to utf8.RuneError before
|
|
||||||
// being passed to the mapping func.
|
|
||||||
func Map(mapping func(rune) rune) Transformer {
|
|
||||||
return Transformer{mapper(mapping)}
|
|
||||||
}
|
|
||||||
|
|
||||||
type mapper func(rune) rune
|
|
||||||
|
|
||||||
func (mapper) Reset() {}
|
|
||||||
|
|
||||||
// Span implements transform.Spanner.
|
|
||||||
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
for r, size := rune(0), 0; n < len(src); n += size {
|
|
||||||
if r = rune(src[n]); r < utf8.RuneSelf {
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
|
||||||
// Invalid rune.
|
|
||||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
} else {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if t(r) != r {
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform implements transform.Transformer.
|
|
||||||
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
var replacement rune
|
|
||||||
var b [utf8.UTFMax]byte
|
|
||||||
|
|
||||||
for r, size := rune(0), 0; nSrc < len(src); {
|
|
||||||
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
|
||||||
if replacement = t(r); replacement < utf8.RuneSelf {
|
|
||||||
if nDst == len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
dst[nDst] = byte(replacement)
|
|
||||||
nDst++
|
|
||||||
nSrc++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
size = 1
|
|
||||||
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
|
|
||||||
// Invalid rune.
|
|
||||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
|
|
||||||
if nDst+3 > len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
dst[nDst+0] = runeErrorString[0]
|
|
||||||
dst[nDst+1] = runeErrorString[1]
|
|
||||||
dst[nDst+2] = runeErrorString[2]
|
|
||||||
nDst += 3
|
|
||||||
nSrc++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if replacement = t(r); replacement == r {
|
|
||||||
if nDst+size > len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
for i := 0; i < size; i++ {
|
|
||||||
dst[nDst] = src[nSrc]
|
|
||||||
nDst++
|
|
||||||
nSrc++
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
n := utf8.EncodeRune(b[:], replacement)
|
|
||||||
|
|
||||||
if nDst+n > len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
dst[nDst] = b[i]
|
|
||||||
nDst++
|
|
||||||
}
|
|
||||||
nSrc += size
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceIllFormed returns a transformer that replaces all input bytes that are
|
|
||||||
// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
|
|
||||||
func ReplaceIllFormed() Transformer {
|
|
||||||
return Transformer{&replaceIllFormed{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type replaceIllFormed struct{ transform.NopResetter }
|
|
||||||
|
|
||||||
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
for n < len(src) {
|
|
||||||
// ASCII fast path.
|
|
||||||
if src[n] < utf8.RuneSelf {
|
|
||||||
n++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
r, size := utf8.DecodeRune(src[n:])
|
|
||||||
|
|
||||||
// Look for a valid non-ASCII rune.
|
|
||||||
if r != utf8.RuneError || size != 1 {
|
|
||||||
n += size
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for short source data.
|
|
||||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have an invalid rune.
|
|
||||||
err = transform.ErrEndOfSpan
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
for nSrc < len(src) {
|
|
||||||
// ASCII fast path.
|
|
||||||
if r := src[nSrc]; r < utf8.RuneSelf {
|
|
||||||
if nDst == len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
dst[nDst] = r
|
|
||||||
nDst++
|
|
||||||
nSrc++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for a valid non-ASCII rune.
|
|
||||||
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
|
|
||||||
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nDst += size
|
|
||||||
nSrc += size
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for short source data.
|
|
||||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
|
||||||
err = transform.ErrShortSrc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have an invalid rune.
|
|
||||||
if nDst+3 > len(dst) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
break
|
|
||||||
}
|
|
||||||
dst[nDst+0] = runeErrorString[0]
|
|
||||||
dst[nDst+1] = runeErrorString[1]
|
|
||||||
dst[nDst+2] = runeErrorString[2]
|
|
||||||
nDst += 3
|
|
||||||
nSrc++
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
|
||||||
}
|
|
36
vendor/golang.org/x/text/secure/precis/class.go
generated
vendored
36
vendor/golang.org/x/text/secure/precis/class.go
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package precis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: Add contextual character rules from Appendix A of RFC5892.
|
|
||||||
|
|
||||||
// A class is a set of characters that match certain derived properties. The
|
|
||||||
// PRECIS framework defines two classes: The Freeform class and the Identifier
|
|
||||||
// class. The freeform class should be used for profiles where expressiveness is
|
|
||||||
// prioritized over safety such as nicknames or passwords. The identifier class
|
|
||||||
// should be used for profiles where safety is the first priority such as
|
|
||||||
// addressable network labels and usernames.
|
|
||||||
type class struct {
|
|
||||||
validFrom property
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains satisfies the runes.Set interface and returns whether the given rune
|
|
||||||
// is a member of the class.
|
|
||||||
func (c class) Contains(r rune) bool {
|
|
||||||
b := make([]byte, 4)
|
|
||||||
n := utf8.EncodeRune(b, r)
|
|
||||||
|
|
||||||
trieval, _ := dpTrie.lookup(b[:n])
|
|
||||||
return c.validFrom <= property(trieval)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
identifier = &class{validFrom: pValid}
|
|
||||||
freeform = &class{validFrom: idDisOrFreePVal}
|
|
||||||
)
|
|
139
vendor/golang.org/x/text/secure/precis/context.go
generated
vendored
139
vendor/golang.org/x/text/secure/precis/context.go
generated
vendored
@ -1,139 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package precis
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
// This file contains tables and code related to context rules.
|
|
||||||
|
|
||||||
type catBitmap uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
// These bits, once set depending on the current value, are never unset.
|
|
||||||
bJapanese catBitmap = 1 << iota
|
|
||||||
bArabicIndicDigit
|
|
||||||
bExtendedArabicIndicDigit
|
|
||||||
|
|
||||||
// These bits are set on each iteration depending on the current value.
|
|
||||||
bJoinStart
|
|
||||||
bJoinMid
|
|
||||||
bJoinEnd
|
|
||||||
bVirama
|
|
||||||
bLatinSmallL
|
|
||||||
bGreek
|
|
||||||
bHebrew
|
|
||||||
|
|
||||||
// These bits indicated which of the permanent bits need to be set at the
|
|
||||||
// end of the checks.
|
|
||||||
bMustHaveJapn
|
|
||||||
|
|
||||||
permanent = bJapanese | bArabicIndicDigit | bExtendedArabicIndicDigit | bMustHaveJapn
|
|
||||||
)
|
|
||||||
|
|
||||||
const finalShift = 10
|
|
||||||
|
|
||||||
var errContext = errors.New("precis: contextual rule violated")
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Programmatically set these required bits as, manually setting them seems
|
|
||||||
// too error prone.
|
|
||||||
for i, ct := range categoryTransitions {
|
|
||||||
categoryTransitions[i].keep |= permanent
|
|
||||||
categoryTransitions[i].accept |= ct.term
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var categoryTransitions = []struct {
|
|
||||||
keep catBitmap // mask selecting which bits to keep from the previous state
|
|
||||||
set catBitmap // mask for which bits to set for this transition
|
|
||||||
|
|
||||||
// These bitmaps are used for rules that require lookahead.
|
|
||||||
// term&accept == term must be true, which is enforced programmatically.
|
|
||||||
term catBitmap // bits accepted as termination condition
|
|
||||||
accept catBitmap // bits that pass, but not sufficient as termination
|
|
||||||
|
|
||||||
// The rule function cannot take a *context as an argument, as it would
|
|
||||||
// cause the context to escape, adding significant overhead.
|
|
||||||
rule func(beforeBits catBitmap) (doLookahead bool, err error)
|
|
||||||
}{
|
|
||||||
joiningL: {set: bJoinStart},
|
|
||||||
joiningD: {set: bJoinStart | bJoinEnd},
|
|
||||||
joiningT: {keep: bJoinStart, set: bJoinMid},
|
|
||||||
joiningR: {set: bJoinEnd},
|
|
||||||
viramaModifier: {set: bVirama},
|
|
||||||
viramaJoinT: {set: bVirama | bJoinMid},
|
|
||||||
latinSmallL: {set: bLatinSmallL},
|
|
||||||
greek: {set: bGreek},
|
|
||||||
greekJoinT: {set: bGreek | bJoinMid},
|
|
||||||
hebrew: {set: bHebrew},
|
|
||||||
hebrewJoinT: {set: bHebrew | bJoinMid},
|
|
||||||
japanese: {set: bJapanese},
|
|
||||||
katakanaMiddleDot: {set: bMustHaveJapn},
|
|
||||||
|
|
||||||
zeroWidthNonJoiner: {
|
|
||||||
term: bJoinEnd,
|
|
||||||
accept: bJoinMid,
|
|
||||||
rule: func(before catBitmap) (doLookAhead bool, err error) {
|
|
||||||
if before&bVirama != 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
if before&bJoinStart == 0 {
|
|
||||||
return false, errContext
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
zeroWidthJoiner: {
|
|
||||||
rule: func(before catBitmap) (doLookAhead bool, err error) {
|
|
||||||
if before&bVirama == 0 {
|
|
||||||
err = errContext
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
},
|
|
||||||
},
|
|
||||||
middleDot: {
|
|
||||||
term: bLatinSmallL,
|
|
||||||
rule: func(before catBitmap) (doLookAhead bool, err error) {
|
|
||||||
if before&bLatinSmallL == 0 {
|
|
||||||
return false, errContext
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
greekLowerNumeralSign: {
|
|
||||||
set: bGreek,
|
|
||||||
term: bGreek,
|
|
||||||
rule: func(before catBitmap) (doLookAhead bool, err error) {
|
|
||||||
return true, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
hebrewPreceding: {
|
|
||||||
set: bHebrew,
|
|
||||||
rule: func(before catBitmap) (doLookAhead bool, err error) {
|
|
||||||
if before&bHebrew == 0 {
|
|
||||||
err = errContext
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
},
|
|
||||||
},
|
|
||||||
arabicIndicDigit: {
|
|
||||||
set: bArabicIndicDigit,
|
|
||||||
rule: func(before catBitmap) (doLookAhead bool, err error) {
|
|
||||||
if before&bExtendedArabicIndicDigit != 0 {
|
|
||||||
err = errContext
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
},
|
|
||||||
},
|
|
||||||
extendedArabicIndicDigit: {
|
|
||||||
set: bExtendedArabicIndicDigit,
|
|
||||||
rule: func(before catBitmap) (doLookAhead bool, err error) {
|
|
||||||
if before&bArabicIndicDigit != 0 {
|
|
||||||
err = errContext
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
14
vendor/golang.org/x/text/secure/precis/doc.go
generated
vendored
14
vendor/golang.org/x/text/secure/precis/doc.go
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package precis contains types and functions for the preparation,
|
|
||||||
// enforcement, and comparison of internationalized strings ("PRECIS") as
|
|
||||||
// defined in RFC 7564. It also contains several pre-defined profiles for
|
|
||||||
// passwords, nicknames, and usernames as defined in RFC 7613 and RFC 7700.
|
|
||||||
//
|
|
||||||
// BE ADVISED: This package is under construction and the API may change in
|
|
||||||
// backwards incompatible ways and without notice.
|
|
||||||
package precis // import "golang.org/x/text/secure/precis"
|
|
||||||
|
|
||||||
//go:generate go run gen.go gen_trieval.go
|
|
70
vendor/golang.org/x/text/secure/precis/nickname.go
generated
vendored
70
vendor/golang.org/x/text/secure/precis/nickname.go
generated
vendored
@ -1,70 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package precis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
)
|
|
||||||
|
|
||||||
type nickAdditionalMapping struct {
|
|
||||||
// TODO: This transformer needs to be stateless somehow…
|
|
||||||
notStart bool
|
|
||||||
prevSpace bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *nickAdditionalMapping) Reset() {
|
|
||||||
t.prevSpace = false
|
|
||||||
t.notStart = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *nickAdditionalMapping) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
// RFC 7700 §2.1. Rules
|
|
||||||
//
|
|
||||||
// 2. Additional Mapping Rule: The additional mapping rule consists of
|
|
||||||
// the following sub-rules.
|
|
||||||
//
|
|
||||||
// 1. Any instances of non-ASCII space MUST be mapped to ASCII
|
|
||||||
// space (U+0020); a non-ASCII space is any Unicode code point
|
|
||||||
// having a general category of "Zs", naturally with the
|
|
||||||
// exception of U+0020.
|
|
||||||
//
|
|
||||||
// 2. Any instances of the ASCII space character at the beginning
|
|
||||||
// or end of a nickname MUST be removed (e.g., "stpeter " is
|
|
||||||
// mapped to "stpeter").
|
|
||||||
//
|
|
||||||
// 3. Interior sequences of more than one ASCII space character
|
|
||||||
// MUST be mapped to a single ASCII space character (e.g.,
|
|
||||||
// "St Peter" is mapped to "St Peter").
|
|
||||||
|
|
||||||
for nSrc < len(src) {
|
|
||||||
r, size := utf8.DecodeRune(src[nSrc:])
|
|
||||||
if size == 0 { // Incomplete UTF-8 encoding
|
|
||||||
if !atEOF {
|
|
||||||
return nDst, nSrc, transform.ErrShortSrc
|
|
||||||
}
|
|
||||||
size = 1
|
|
||||||
}
|
|
||||||
if unicode.Is(unicode.Zs, r) {
|
|
||||||
t.prevSpace = true
|
|
||||||
} else {
|
|
||||||
if t.prevSpace && t.notStart {
|
|
||||||
dst[nDst] = ' '
|
|
||||||
nDst += 1
|
|
||||||
}
|
|
||||||
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
|
|
||||||
nDst += size
|
|
||||||
return nDst, nSrc, transform.ErrShortDst
|
|
||||||
}
|
|
||||||
nDst += size
|
|
||||||
t.prevSpace = false
|
|
||||||
t.notStart = true
|
|
||||||
}
|
|
||||||
nSrc += size
|
|
||||||
}
|
|
||||||
return nDst, nSrc, nil
|
|
||||||
}
|
|
153
vendor/golang.org/x/text/secure/precis/options.go
generated
vendored
153
vendor/golang.org/x/text/secure/precis/options.go
generated
vendored
@ -1,153 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package precis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/text/cases"
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/runes"
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// An Option is used to define the behavior and rules of a Profile.
|
|
||||||
type Option func(*options)
|
|
||||||
|
|
||||||
type options struct {
|
|
||||||
// Preparation options
|
|
||||||
foldWidth bool
|
|
||||||
|
|
||||||
// Enforcement options
|
|
||||||
asciiLower bool
|
|
||||||
cases transform.SpanningTransformer
|
|
||||||
disallow runes.Set
|
|
||||||
norm transform.SpanningTransformer
|
|
||||||
additional []func() transform.SpanningTransformer
|
|
||||||
width transform.SpanningTransformer
|
|
||||||
disallowEmpty bool
|
|
||||||
bidiRule bool
|
|
||||||
|
|
||||||
// Comparison options
|
|
||||||
ignorecase bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOpts(o ...Option) (res options) {
|
|
||||||
for _, f := range o {
|
|
||||||
f(&res)
|
|
||||||
}
|
|
||||||
// Using a SpanningTransformer, instead of norm.Form prevents an allocation
|
|
||||||
// down the road.
|
|
||||||
if res.norm == nil {
|
|
||||||
res.norm = norm.NFC
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// The IgnoreCase option causes the profile to perform a case insensitive
|
|
||||||
// comparison during the PRECIS comparison step.
|
|
||||||
IgnoreCase Option = ignoreCase
|
|
||||||
|
|
||||||
// The FoldWidth option causes the profile to map non-canonical wide and
|
|
||||||
// narrow variants to their decomposition mapping. This is useful for
|
|
||||||
// profiles that are based on the identifier class which would otherwise
|
|
||||||
// disallow such characters.
|
|
||||||
FoldWidth Option = foldWidth
|
|
||||||
|
|
||||||
// The DisallowEmpty option causes the enforcement step to return an error if
|
|
||||||
// the resulting string would be empty.
|
|
||||||
DisallowEmpty Option = disallowEmpty
|
|
||||||
|
|
||||||
// The BidiRule option causes the Bidi Rule defined in RFC 5893 to be
|
|
||||||
// applied.
|
|
||||||
BidiRule Option = bidiRule
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ignoreCase = func(o *options) {
|
|
||||||
o.ignorecase = true
|
|
||||||
}
|
|
||||||
foldWidth = func(o *options) {
|
|
||||||
o.foldWidth = true
|
|
||||||
}
|
|
||||||
disallowEmpty = func(o *options) {
|
|
||||||
o.disallowEmpty = true
|
|
||||||
}
|
|
||||||
bidiRule = func(o *options) {
|
|
||||||
o.bidiRule = true
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: move this logic to package transform
|
|
||||||
|
|
||||||
type spanWrap struct{ transform.Transformer }
|
|
||||||
|
|
||||||
func (s spanWrap) Span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
return 0, transform.ErrEndOfSpan
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: allow different types? For instance:
|
|
||||||
// func() transform.Transformer
|
|
||||||
// func() transform.SpanningTransformer
|
|
||||||
// func([]byte) bool // validation only
|
|
||||||
//
|
|
||||||
// Also, would be great if we could detect if a transformer is reentrant.
|
|
||||||
|
|
||||||
// The AdditionalMapping option defines the additional mapping rule for the
|
|
||||||
// Profile by applying Transformer's in sequence.
|
|
||||||
func AdditionalMapping(t ...func() transform.Transformer) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
for _, f := range t {
|
|
||||||
sf := func() transform.SpanningTransformer {
|
|
||||||
return f().(transform.SpanningTransformer)
|
|
||||||
}
|
|
||||||
if _, ok := f().(transform.SpanningTransformer); !ok {
|
|
||||||
sf = func() transform.SpanningTransformer {
|
|
||||||
return spanWrap{f()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o.additional = append(o.additional, sf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Norm option defines a Profile's normalization rule. Defaults to NFC.
|
|
||||||
func Norm(f norm.Form) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.norm = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The FoldCase option defines a Profile's case mapping rule. Options can be
|
|
||||||
// provided to determine the type of case folding used.
|
|
||||||
func FoldCase(opts ...cases.Option) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.asciiLower = true
|
|
||||||
o.cases = cases.Fold(opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The LowerCase option defines a Profile's case mapping rule. Options can be
|
|
||||||
// provided to determine the type of case folding used.
|
|
||||||
func LowerCase(opts ...cases.Option) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.asciiLower = true
|
|
||||||
if len(opts) == 0 {
|
|
||||||
o.cases = cases.Lower(language.Und, cases.HandleFinalSigma(false))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = append([]cases.Option{cases.HandleFinalSigma(false)}, opts...)
|
|
||||||
o.cases = cases.Lower(language.Und, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Disallow option further restricts a Profile's allowed characters beyond
|
|
||||||
// what is disallowed by the underlying string class.
|
|
||||||
func Disallow(set runes.Set) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.disallow = set
|
|
||||||
}
|
|
||||||
}
|
|
388
vendor/golang.org/x/text/secure/precis/profile.go
generated
vendored
388
vendor/golang.org/x/text/secure/precis/profile.go
generated
vendored
@ -1,388 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package precis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/cases"
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
"golang.org/x/text/runes"
|
|
||||||
"golang.org/x/text/secure/bidirule"
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
"golang.org/x/text/width"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errDisallowedRune = errors.New("precis: disallowed rune encountered")
|
|
||||||
)
|
|
||||||
|
|
||||||
var dpTrie = newDerivedPropertiesTrie(0)
|
|
||||||
|
|
||||||
// A Profile represents a set of rules for normalizing and validating strings in
|
|
||||||
// the PRECIS framework.
|
|
||||||
type Profile struct {
|
|
||||||
options
|
|
||||||
class *class
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIdentifier creates a new PRECIS profile based on the Identifier string
|
|
||||||
// class. Profiles created from this class are suitable for use where safety is
|
|
||||||
// prioritized over expressiveness like network identifiers, user accounts, chat
|
|
||||||
// rooms, and file names.
|
|
||||||
func NewIdentifier(opts ...Option) *Profile {
|
|
||||||
return &Profile{
|
|
||||||
options: getOpts(opts...),
|
|
||||||
class: identifier,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFreeform creates a new PRECIS profile based on the Freeform string class.
|
|
||||||
// Profiles created from this class are suitable for use where expressiveness is
|
|
||||||
// prioritized over safety like passwords, and display-elements such as
|
|
||||||
// nicknames in a chat room.
|
|
||||||
func NewFreeform(opts ...Option) *Profile {
|
|
||||||
return &Profile{
|
|
||||||
options: getOpts(opts...),
|
|
||||||
class: freeform,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTransformer creates a new transform.Transformer that performs the PRECIS
|
|
||||||
// preparation and enforcement steps on the given UTF-8 encoded bytes.
|
|
||||||
func (p *Profile) NewTransformer() *Transformer {
|
|
||||||
var ts []transform.Transformer
|
|
||||||
|
|
||||||
// These transforms are applied in the order defined in
|
|
||||||
// https://tools.ietf.org/html/rfc7564#section-7
|
|
||||||
|
|
||||||
if p.options.foldWidth {
|
|
||||||
ts = append(ts, width.Fold)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range p.options.additional {
|
|
||||||
ts = append(ts, f())
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.options.cases != nil {
|
|
||||||
ts = append(ts, p.options.cases)
|
|
||||||
}
|
|
||||||
|
|
||||||
ts = append(ts, p.options.norm)
|
|
||||||
|
|
||||||
if p.options.bidiRule {
|
|
||||||
ts = append(ts, bidirule.New())
|
|
||||||
}
|
|
||||||
|
|
||||||
ts = append(ts, &checker{p: p, allowed: p.Allowed()})
|
|
||||||
|
|
||||||
// TODO: Add the disallow empty rule with a dummy transformer?
|
|
||||||
|
|
||||||
return &Transformer{transform.Chain(ts...)}
|
|
||||||
}
|
|
||||||
|
|
||||||
var errEmptyString = errors.New("precis: transformation resulted in empty string")
|
|
||||||
|
|
||||||
type buffers struct {
|
|
||||||
src []byte
|
|
||||||
buf [2][]byte
|
|
||||||
next int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffers) apply(t transform.SpanningTransformer) (err error) {
|
|
||||||
n, err := t.Span(b.src, true)
|
|
||||||
if err != transform.ErrEndOfSpan {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
x := b.next & 1
|
|
||||||
if b.buf[x] == nil {
|
|
||||||
b.buf[x] = make([]byte, 0, 8+len(b.src)+len(b.src)>>2)
|
|
||||||
}
|
|
||||||
span := append(b.buf[x][:0], b.src[:n]...)
|
|
||||||
b.src, _, err = transform.Append(t, span, b.src[n:])
|
|
||||||
b.buf[x] = b.src
|
|
||||||
b.next++
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pre-allocate transformers when possible. In some cases this avoids allocation.
|
|
||||||
var (
|
|
||||||
foldWidthT transform.SpanningTransformer = width.Fold
|
|
||||||
lowerCaseT transform.SpanningTransformer = cases.Lower(language.Und, cases.HandleFinalSigma(false))
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: make this a method on profile.
|
|
||||||
|
|
||||||
func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, err error) {
|
|
||||||
b.src = src
|
|
||||||
|
|
||||||
ascii := true
|
|
||||||
for _, c := range src {
|
|
||||||
if c >= utf8.RuneSelf {
|
|
||||||
ascii = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// ASCII fast path.
|
|
||||||
if ascii {
|
|
||||||
for _, f := range p.options.additional {
|
|
||||||
if err = b.apply(f()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case p.options.asciiLower || (comparing && p.options.ignorecase):
|
|
||||||
for i, c := range b.src {
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
b.src[i] = c ^ 1<<5
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case p.options.cases != nil:
|
|
||||||
b.apply(p.options.cases)
|
|
||||||
}
|
|
||||||
c := checker{p: p}
|
|
||||||
if _, err := c.span(b.src, true); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if p.disallow != nil {
|
|
||||||
for _, c := range b.src {
|
|
||||||
if p.disallow.Contains(rune(c)) {
|
|
||||||
return nil, errDisallowedRune
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.options.disallowEmpty && len(b.src) == 0 {
|
|
||||||
return nil, errEmptyString
|
|
||||||
}
|
|
||||||
return b.src, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// These transforms are applied in the order defined in
|
|
||||||
// https://tools.ietf.org/html/rfc7564#section-7
|
|
||||||
|
|
||||||
// TODO: allow different width transforms options.
|
|
||||||
if p.options.foldWidth || (p.options.ignorecase && comparing) {
|
|
||||||
b.apply(foldWidthT)
|
|
||||||
}
|
|
||||||
for _, f := range p.options.additional {
|
|
||||||
if err = b.apply(f()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.options.cases != nil {
|
|
||||||
b.apply(p.options.cases)
|
|
||||||
}
|
|
||||||
if comparing && p.options.ignorecase {
|
|
||||||
b.apply(lowerCaseT)
|
|
||||||
}
|
|
||||||
b.apply(p.norm)
|
|
||||||
if p.options.bidiRule && !bidirule.Valid(b.src) {
|
|
||||||
return nil, bidirule.ErrInvalid
|
|
||||||
}
|
|
||||||
c := checker{p: p}
|
|
||||||
if _, err := c.span(b.src, true); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if p.disallow != nil {
|
|
||||||
for i := 0; i < len(b.src); {
|
|
||||||
r, size := utf8.DecodeRune(b.src[i:])
|
|
||||||
if p.disallow.Contains(r) {
|
|
||||||
return nil, errDisallowedRune
|
|
||||||
}
|
|
||||||
i += size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.options.disallowEmpty && len(b.src) == 0 {
|
|
||||||
return nil, errEmptyString
|
|
||||||
}
|
|
||||||
return b.src, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append appends the result of applying p to src writing the result to dst.
|
|
||||||
// It returns an error if the input string is invalid.
|
|
||||||
func (p *Profile) Append(dst, src []byte) ([]byte, error) {
|
|
||||||
var buf buffers
|
|
||||||
b, err := buf.enforce(p, src, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return append(dst, b...), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func processBytes(p *Profile, b []byte, key bool) ([]byte, error) {
|
|
||||||
var buf buffers
|
|
||||||
b, err := buf.enforce(p, b, key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if buf.next == 0 {
|
|
||||||
c := make([]byte, len(b))
|
|
||||||
copy(c, b)
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns a new byte slice with the result of applying the profile to b.
|
|
||||||
func (p *Profile) Bytes(b []byte) ([]byte, error) {
|
|
||||||
return processBytes(p, b, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendCompareKey appends the result of applying p to src (including any
|
|
||||||
// optional rules to make strings comparable or useful in a map key such as
|
|
||||||
// applying lowercasing) writing the result to dst. It returns an error if the
|
|
||||||
// input string is invalid.
|
|
||||||
func (p *Profile) AppendCompareKey(dst, src []byte) ([]byte, error) {
|
|
||||||
var buf buffers
|
|
||||||
b, err := buf.enforce(p, src, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return append(dst, b...), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func processString(p *Profile, s string, key bool) (string, error) {
|
|
||||||
var buf buffers
|
|
||||||
b, err := buf.enforce(p, []byte(s), key)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return string(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string with the result of applying the profile to s.
|
|
||||||
func (p *Profile) String(s string) (string, error) {
|
|
||||||
return processString(p, s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompareKey returns a string that can be used for comparison, hashing, or
|
|
||||||
// collation.
|
|
||||||
func (p *Profile) CompareKey(s string) (string, error) {
|
|
||||||
return processString(p, s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare enforces both strings, and then compares them for bit-string identity
|
|
||||||
// (byte-for-byte equality). If either string cannot be enforced, the comparison
|
|
||||||
// is false.
|
|
||||||
func (p *Profile) Compare(a, b string) bool {
|
|
||||||
var buf buffers
|
|
||||||
|
|
||||||
akey, err := buf.enforce(p, []byte(a), true)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = buffers{}
|
|
||||||
bkey, err := buf.enforce(p, []byte(b), true)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return bytes.Compare(akey, bkey) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allowed returns a runes.Set containing every rune that is a member of the
|
|
||||||
// underlying profile's string class and not disallowed by any profile specific
|
|
||||||
// rules.
|
|
||||||
func (p *Profile) Allowed() runes.Set {
|
|
||||||
if p.options.disallow != nil {
|
|
||||||
return runes.Predicate(func(r rune) bool {
|
|
||||||
return p.class.Contains(r) && !p.options.disallow.Contains(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return p.class
|
|
||||||
}
|
|
||||||
|
|
||||||
type checker struct {
|
|
||||||
p *Profile
|
|
||||||
allowed runes.Set
|
|
||||||
|
|
||||||
beforeBits catBitmap
|
|
||||||
termBits catBitmap
|
|
||||||
acceptBits catBitmap
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *checker) Reset() {
|
|
||||||
c.beforeBits = 0
|
|
||||||
c.termBits = 0
|
|
||||||
c.acceptBits = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *checker) span(src []byte, atEOF bool) (n int, err error) {
|
|
||||||
for n < len(src) {
|
|
||||||
e, sz := dpTrie.lookup(src[n:])
|
|
||||||
d := categoryTransitions[category(e&catMask)]
|
|
||||||
if sz == 0 {
|
|
||||||
if !atEOF {
|
|
||||||
return n, transform.ErrShortSrc
|
|
||||||
}
|
|
||||||
return n, errDisallowedRune
|
|
||||||
}
|
|
||||||
if property(e) < c.p.class.validFrom {
|
|
||||||
if d.rule == nil {
|
|
||||||
return n, errDisallowedRune
|
|
||||||
}
|
|
||||||
doLookAhead, err := d.rule(c.beforeBits)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if doLookAhead {
|
|
||||||
c.beforeBits &= d.keep
|
|
||||||
c.beforeBits |= d.set
|
|
||||||
// We may still have a lookahead rule which we will require to
|
|
||||||
// complete (by checking termBits == 0) before setting the new
|
|
||||||
// bits.
|
|
||||||
if c.termBits != 0 && (!c.checkLookahead() || c.termBits == 0) {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
c.termBits = d.term
|
|
||||||
c.acceptBits = d.accept
|
|
||||||
n += sz
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.beforeBits &= d.keep
|
|
||||||
c.beforeBits |= d.set
|
|
||||||
if c.termBits != 0 && !c.checkLookahead() {
|
|
||||||
return n, errContext
|
|
||||||
}
|
|
||||||
n += sz
|
|
||||||
}
|
|
||||||
if m := c.beforeBits >> finalShift; c.beforeBits&m != m || c.termBits != 0 {
|
|
||||||
err = errContext
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *checker) checkLookahead() bool {
|
|
||||||
switch {
|
|
||||||
case c.beforeBits&c.termBits != 0:
|
|
||||||
c.termBits = 0
|
|
||||||
c.acceptBits = 0
|
|
||||||
case c.beforeBits&c.acceptBits != 0:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: we may get rid of this transform if transform.Chain understands
|
|
||||||
// something like a Spanner interface.
|
|
||||||
func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
|
||||||
short := false
|
|
||||||
if len(dst) < len(src) {
|
|
||||||
src = src[:len(dst)]
|
|
||||||
atEOF = false
|
|
||||||
short = true
|
|
||||||
}
|
|
||||||
nSrc, err = c.span(src, atEOF)
|
|
||||||
nDst = copy(dst, src[:nSrc])
|
|
||||||
if short && (err == transform.ErrShortSrc || err == nil) {
|
|
||||||
err = transform.ErrShortDst
|
|
||||||
}
|
|
||||||
return nDst, nSrc, err
|
|
||||||
}
|
|
78
vendor/golang.org/x/text/secure/precis/profiles.go
generated
vendored
78
vendor/golang.org/x/text/secure/precis/profiles.go
generated
vendored
@ -1,78 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package precis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"golang.org/x/text/runes"
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Implements the Nickname profile specified in RFC 7700.
|
|
||||||
// The nickname profile is not idempotent and may need to be applied multiple
|
|
||||||
// times before being used for comparisons.
|
|
||||||
Nickname *Profile = nickname
|
|
||||||
|
|
||||||
// Implements the UsernameCaseMapped profile specified in RFC 7613.
|
|
||||||
UsernameCaseMapped *Profile = usernameCaseMap
|
|
||||||
|
|
||||||
// Implements the UsernameCasePreserved profile specified in RFC 7613.
|
|
||||||
UsernameCasePreserved *Profile = usernameNoCaseMap
|
|
||||||
|
|
||||||
// Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels.
|
|
||||||
OpaqueString *Profile = opaquestring
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
nickname = &Profile{
|
|
||||||
options: getOpts(
|
|
||||||
AdditionalMapping(func() transform.Transformer {
|
|
||||||
return &nickAdditionalMapping{}
|
|
||||||
}),
|
|
||||||
IgnoreCase,
|
|
||||||
Norm(norm.NFKC),
|
|
||||||
DisallowEmpty,
|
|
||||||
),
|
|
||||||
class: freeform,
|
|
||||||
}
|
|
||||||
usernameCaseMap = &Profile{
|
|
||||||
options: getOpts(
|
|
||||||
FoldWidth,
|
|
||||||
LowerCase(),
|
|
||||||
Norm(norm.NFC),
|
|
||||||
BidiRule,
|
|
||||||
),
|
|
||||||
class: identifier,
|
|
||||||
}
|
|
||||||
usernameNoCaseMap = &Profile{
|
|
||||||
options: getOpts(
|
|
||||||
FoldWidth,
|
|
||||||
Norm(norm.NFC),
|
|
||||||
BidiRule,
|
|
||||||
),
|
|
||||||
class: identifier,
|
|
||||||
}
|
|
||||||
opaquestring = &Profile{
|
|
||||||
options: getOpts(
|
|
||||||
AdditionalMapping(func() transform.Transformer {
|
|
||||||
return mapSpaces
|
|
||||||
}),
|
|
||||||
Norm(norm.NFC),
|
|
||||||
DisallowEmpty,
|
|
||||||
),
|
|
||||||
class: freeform,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// mapSpaces is a shared value of a runes.Map transformer.
|
|
||||||
var mapSpaces transform.Transformer = runes.Map(func(r rune) rune {
|
|
||||||
if unicode.Is(unicode.Zs, r) {
|
|
||||||
return ' '
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
})
|
|
3788
vendor/golang.org/x/text/secure/precis/tables.go
generated
vendored
3788
vendor/golang.org/x/text/secure/precis/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user